commit 65b7c00a8efab4d50d664581274c0ba7254cc94d Author: Yu Li Date: Thu Jun 7 14:12:55 2018 +0800 HBASE-20691 Change the default WAL storage policy back to "NONE"" This reverts commit 564c193d61cd1f92688a08a3af6d55ce4c4636d8 and added more doc about why we choose "NONE" as the default. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9241682..c16fdb5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1072,7 +1072,14 @@ public final class HConstants { * Valid values are: HOT, COLD, WARM, ALL_SSD, ONE_SSD, LAZY_PERSIST * See http://hadoop.apache.org/docs/r2.7.3/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/ public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy"; - public static final String DEFAULT_WAL_STORAGE_POLICY = "HOT"; + /** "NONE" is not a valid storage policy and means we defer the policy to HDFS */ + public static final String DEFER_TO_HDFS_STORAGE_POLICY = "NONE"; + /** + * In our current implementation we will by-pass user setting if it's the same as the default + * policy, so we intentionally choose an invalid one (defer to HDFS by default). + * @see HBASE-20691 + */ + public static final String DEFAULT_WAL_STORAGE_POLICY = DEFER_TO_HDFS_STORAGE_POLICY; /** Region in Transition metrics threshold time */ public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index a862c8c..9017231 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -41,12 +41,14 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -348,10 +350,12 @@ public class TestFSUtils { verifyFileInDirWithStoragePolicy("ALL_SSD"); } + final String INVALID_STORAGE_POLICY = "1772"; + /* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */ @Test public void testSetStoragePolicyInvalid() throws Exception { - verifyFileInDirWithStoragePolicy("1772"); + verifyFileInDirWithStoragePolicy(INVALID_STORAGE_POLICY); } // Here instead of TestCommonFSUtils because we need a minicluster @@ -372,6 +376,18 @@ public class TestFSUtils { String file = UUID.randomUUID().toString(); Path p = new Path(testDir, file); WriteDataToHDFS(fs, p, 4096); + HFileSystem hfs = new HFileSystem(fs); + String policySet = hfs.getStoragePolicyName(p); + LOG.debug("The storage policy of path " + p + " is " + policySet); + if (policy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY) + || policy.equals(INVALID_STORAGE_POLICY)) { + String hdfsDefaultPolicy = hfs.getStoragePolicyName(hfs.getHomeDirectory()); + LOG.debug("The default hdfs storage policy (indicated by home path: " + + hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy); + Assert.assertEquals(hdfsDefaultPolicy, policySet); + } else { + Assert.assertEquals(policy, policySet); + } // will assert existance before deleting. cleanupFile(fs, testDir); } finally {