Index: src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java (revision 0) +++ src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java (revision 0) @@ -0,0 +1,48 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.Test; + +/** + * Test {@link FSUtils}. + */ +public class TestFSUtils { + @Test public void testIsHDFS() throws Exception { + HBaseTestingUtility htu = new HBaseTestingUtility(); + htu.getConfiguration().setBoolean("dfs.support.append", false); + assertFalse(FSUtils.isHDFS(htu.getConfiguration())); + assertFalse(FSUtils.isAppendSupported(htu.getConfiguration())); + htu.getConfiguration().setBoolean("dfs.support.append", true); + MiniDFSCluster cluster = null; + try { + cluster = htu.startMiniDFSCluster(1); + assertTrue(FSUtils.isHDFS(htu.getConfiguration())); + assertTrue(FSUtils.isAppendSupported(htu.getConfiguration())); + } finally { + if (cluster != null) cluster.shutdown(); + } + } +} Index: src/main/java/org/apache/hadoop/hbase/util/FSUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 991708) +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy) @@ -590,6 +590,16 @@ return append; } + /** + * @param conf + * @return True if this filesystem whose scheme is 'hdfs'. + * @throws IOException + */ + public static boolean isHDFS(final Configuration conf) throws IOException { + FileSystem fs = FileSystem.get(conf); + String scheme = fs.getUri().getScheme(); + return scheme.equalsIgnoreCase("hdfs"); + } /* * Recover file lease. Used when a file might be suspect to be had been left open by another process. p Index: src/main/resources/hbase-webapps/master/master.jsp =================================================================== --- src/main/resources/hbase-webapps/master/master.jsp (revision 991708) +++ src/main/resources/hbase-webapps/master/master.jsp (working copy) @@ -47,7 +47,7 @@ for details. <% } %> -<% if (!FSUtils.isAppendSupported(conf)) { %> +<% if (!FSUtils.isAppendSupported(conf) && FSUtils.isHDFS(conf)) { %>
You are currently running the HMaster without HDFS append support enabled. This may result in data loss.