diff --git src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 6bc2cf2..44bbec8 100644 --- src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -25,7 +25,8 @@ ServerName rootLocation = null; ServerName metaLocation = null; List servers = null; Set deadServers = null; -boolean showAppendWarning = false; +boolean showSyncWarning = false; +boolean showHflushWarning = false; String filter = "general"; String format = "html"; @@ -81,9 +82,9 @@ org.apache.hadoop.hbase.HBaseConfiguration; for details. -<%if showAppendWarning %> +<%if showSyncWarning && showHflushWarning %>
- You are currently running the HMaster without HDFS append support enabled. + You are currently running the HMaster without either HDFS sync or hflush support enabled. This may result in data loss. Please see the HBase wiki for details. diff --git src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java index a0aaaf4..17d1219 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java @@ -67,7 +67,8 @@ public class MasterStatusServlet extends HttpServlet { response.setContentType("text/html"); MasterStatusTmpl tmpl = new MasterStatusTmpl() .setFrags(frags) - .setShowAppendWarning(shouldShowAppendWarning(conf)) + .setShowSyncWarning(shouldShowSyncWarning(conf)) + .setShowHflushWarning(shouldShowHflushWarning(conf)) .setRootLocation(rootLocation) .setMetaLocation(metaLocation) .setServers(servers) @@ -100,11 +101,20 @@ public class MasterStatusServlet extends HttpServlet { } } - static boolean shouldShowAppendWarning(Configuration conf) { + static boolean shouldShowSyncWarning(Configuration conf) { try { - return !FSUtils.isAppendSupported(conf) && FSUtils.isHDFS(conf); + return !FSUtils.isSyncSupported() && FSUtils.isHDFS(conf); } catch (IOException e) { - LOG.warn("Unable to determine if append is supported", e); + LOG.warn("Unable to determine if sync is supported", e); + return false; + } + } + + static boolean shouldShowHflushWarning(Configuration conf) { + try { + return !FSUtils.isHflushSupported() && FSUtils.isHDFS(conf); + } catch (IOException e) { + LOG.warn("Unable to determine if hflush is supported", e); return false; } } diff --git src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java index 302a3b1..867802f 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java @@ -57,8 +57,8 @@ public class FSHDFSUtils extends FSUtils{ public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf) throws IOException{ - if (!isAppendSupported(conf)) { - LOG.warn("Running on HDFS without append enabled may result in data loss"); + if (!isSyncSupported() && !isHflushSupported()) { + LOG.warn("Running on HDFS without sync or hflush enabled may result in data loss"); return; } // lease recovery not needed for local file system case. diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 3d35d3e..bbd6846 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -845,36 +845,37 @@ public abstract class FSUtils { } /** - * Heuristic to determine whether is safe or not to open a file for append - * Looks both for dfs.support.append and use reflection to search - * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush() - * @param conf - * @return True if append support + * Heuristic to determine whether is safe or not to open a file for sync + * Uses reflection to search for SequenceFile.Writer.syncFs() + * @return True if sync is supported */ - public static boolean isAppendSupported(final Configuration conf) { - boolean append = conf.getBoolean("dfs.support.append", false); - if (append) { - try { - // TODO: The implementation that comes back when we do a createWriter - // may not be using SequenceFile so the below is not a definitive test. - // Will do for now (hdfs-200). - SequenceFile.Writer.class.getMethod("syncFs", new Class []{}); - append = true; - } catch (SecurityException e) { - } catch (NoSuchMethodException e) { - append = false; - } + public static boolean isSyncSupported() { + boolean sync = true; + try { + // TODO: The implementation that comes back when we do a createWriter + // may not be using SequenceFile so the below is not a definitive test. + // Will do for now (hdfs-200). + SequenceFile.Writer.class.getMethod("syncFs", new Class []{}); + } catch (SecurityException e) { + } catch (NoSuchMethodException e) { + sync = false; } - if (!append) { - // Look for the 0.21, 0.22, new-style append evidence. - try { - FSDataOutputStream.class.getMethod("hflush", new Class []{}); - append = true; - } catch (NoSuchMethodException e) { - append = false; - } + return sync; + } + + /** + * Heuristic to determine whether is safe or not to open a file for hflush + * Uses reflection to search for FSDataOutputStream.hflush() + * @return True if hflush is supported + */ + public static boolean isHflushSupported() { + boolean hflush = true; + try { + FSDataOutputStream.class.getMethod("hflush", new Class []{}); + } catch (NoSuchMethodException e) { + hflush = false; } - return append; + return hflush; } /** diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 3ea0a3b..5c602b3 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -139,9 +139,9 @@ public class TestLogRollAbort { HLog log = server.getWAL(); assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas()); - // don't run this test without append support (HDFS-200 & HDFS-142) - assertTrue("Need append support for this test", - FSUtils.isAppendSupported(TEST_UTIL.getConfiguration())); + // don't run this test without sync or hflush support + assertTrue("Need sync or hflush support for this test", + FSUtils.isSyncSupported() || FSUtils.isHflushSupported()); Put p = new Put(Bytes.toBytes("row2001")); p.add(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2001)); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index cb7efc3..3f7c41c 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -317,9 +317,9 @@ public class TestLogRolling { this.log = server.getWAL(); assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas()); - // don't run this test without append support (HDFS-200 & HDFS-142) - assertTrue("Need append support for this test", FSUtils - .isAppendSupported(TEST_UTIL.getConfiguration())); + // don't run this test without sync or hflush support + assertTrue("Need sync or hflush support for this test", + FSUtils.isSyncSupported() || FSUtils.isHflushSupported()); // add up the datanode count, to ensure proper replication when we kill 1 // This function is synchronous; when it returns, the dfs cluster is active @@ -446,9 +446,9 @@ public class TestLogRolling { }); assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas()); - // don't run this test without append support (HDFS-200 & HDFS-142) - assertTrue("Need append support for this test", FSUtils - .isAppendSupported(TEST_UTIL.getConfiguration())); + // don't run this test without sync or hflush support + assertTrue("Need sync or hflush support for this test", + FSUtils.isSyncSupported() || FSUtils.isHflushSupported()); writeData(table, 1002); diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index 339a120..985d002 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -48,14 +48,12 @@ import org.junit.experimental.categories.Category; public class TestFSUtils { @Test public void testIsHDFS() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); - htu.getConfiguration().setBoolean("dfs.support.append", false); assertFalse(FSUtils.isHDFS(htu.getConfiguration())); - htu.getConfiguration().setBoolean("dfs.support.append", true); + assertTrue(FSUtils.isSyncSupported() || FSUtils.isHflushSupported()); MiniDFSCluster cluster = null; try { cluster = htu.startMiniDFSCluster(1); assertTrue(FSUtils.isHDFS(htu.getConfiguration())); - assertTrue(FSUtils.isAppendSupported(htu.getConfiguration())); } finally { if (cluster != null) cluster.shutdown(); }