commit cf1eed9d3bfc6707d657ca22468eb4912898a421 Author: Yu Li Date: Thu Jun 7 14:12:55 2018 +0800 HBASE-20691 Change the default WAL storage policy back to "NONE"" This reverts commit 564c193d61cd1f92688a08a3af6d55ce4c4636d8 and added more doc about why we choose "NONE" as the default. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9241682..c16fdb5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1072,7 +1072,14 @@ public final class HConstants { * Valid values are: HOT, COLD, WARM, ALL_SSD, ONE_SSD, LAZY_PERSIST * See http://hadoop.apache.org/docs/r2.7.3/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/ public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy"; - public static final String DEFAULT_WAL_STORAGE_POLICY = "HOT"; + /** "NONE" is not a valid storage policy and means we defer the policy to HDFS */ + public static final String DEFER_TO_HDFS_STORAGE_POLICY = "NONE"; + /** + * In our current implementation we will by-pass user setting if it's the same as the default + * policy, so we intentionally choose an invalid one (defer to HDFS by default). + * @see HBASE-20691 + */ + public static final String DEFAULT_WAL_STORAGE_POLICY = DEFER_TO_HDFS_STORAGE_POLICY; /** Region in Transition metrics threshold time */ public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD = diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 5b46de9..82f986f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -25,7 +25,6 @@ import java.lang.reflect.Method; import java.net.URI; import java.net.URISyntaxException; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -454,36 +453,6 @@ public abstract class CommonFSUtils { new Path(namespace))); } - /** - * Sets storage policy for given path according to config setting. - * If the passed path is a directory, we'll set the storage policy for all files - * created in the future in said directory. Note that this change in storage - * policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle. - * If we're running on a FileSystem implementation that doesn't support the given storage policy - * (or storage policies at all), then we'll issue a log message and continue. - * - * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html - * - * @param fs We only do anything it implements a setStoragePolicy method - * @param conf used to look up storage policy with given key; not modified. - * @param path the Path whose storage policy is to be set - * @param policyKey Key to use pulling a policy from Configuration: - * e.g. HConstants.WAL_STORAGE_POLICY (hbase.wal.storage.policy). - * @param defaultPolicy if the configured policy is equal to this policy name, we will skip - * telling the FileSystem to set a storage policy. - */ - public static void setStoragePolicy(final FileSystem fs, final Configuration conf, - final Path path, final String policyKey, final String defaultPolicy) { - String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(Locale.ROOT); - if (storagePolicy.equals(defaultPolicy)) { - if (LOG.isTraceEnabled()) { - LOG.trace("default policy of " + defaultPolicy + " requested, exiting early."); - } - return; - } - setStoragePolicy(fs, path, storagePolicy); - } - // this mapping means that under a federated FileSystem implementation, we'll // only log the first failure from any of the underlying FileSystems at WARN and all others // will be at DEBUG. @@ -514,6 +483,12 @@ public abstract class CommonFSUtils { } return; } + if (storagePolicy.equals(HConstants.DEFAULT_WAL_STORAGE_POLICY)) { + if (LOG.isTraceEnabled()) { + LOG.trace("default policy of " + storagePolicy + " requested, exiting early."); + } + return; + } final String trimmedStoragePolicy = storagePolicy.trim(); if (trimmedStoragePolicy.isEmpty()) { if (LOG.isTraceEnabled()) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index f2931fc..59c9f67 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -216,8 +216,9 @@ public class WALProcedureStore extends ProcedureStoreBase { } } // Now that it exists, set the log policy - CommonFSUtils.setStoragePolicy(fs, conf, walDir, HConstants.WAL_STORAGE_POLICY, - HConstants.DEFAULT_WAL_STORAGE_POLICY); + String storagePolicy = + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + CommonFSUtils.setStoragePolicy(fs, walDir, storagePolicy); // Create archive dir up front. Rename won't work w/o it up on HDFS. if (this.walArchiveDir != null && !this.fs.exists(this.walArchiveDir)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 825ad17..9b31834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -363,8 +363,9 @@ public abstract class AbstractFSWAL implements WAL { } // Now that it exists, set the storage policy for the entire directory of wal files related to // this FSHLog instance - CommonFSUtils.setStoragePolicy(fs, conf, this.walDir, HConstants.WAL_STORAGE_POLICY, - HConstants.DEFAULT_WAL_STORAGE_POLICY); + String storagePolicy = + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + CommonFSUtils.setStoragePolicy(fs, this.walDir, storagePolicy); this.walFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8"); this.prefixPathStr = new Path(walDir, walFilePrefix + WAL_FILE_NAME_DELIMITER).toString(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index a862c8c..b3b7ed7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -41,12 +41,15 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -339,19 +342,40 @@ public class TestFSUtils { @Test public void testSetStoragePolicyDefault() throws Exception { + verifyNoHDFSApiInvocationForDefaultPolicy(); verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY); } + private void verifyNoHDFSApiInvocationForDefaultPolicy() { + FileSystem testFs = new AlwaysFailSetStoragePolicyFileSystem(); + // we should see no warning log, which indicates that the HDFS API is not invoked + LOG.debug("Before set storage policy to NONE"); + FSUtils.setStoragePolicy(testFs, new Path("non-exist"), HConstants.DEFAULT_WAL_STORAGE_POLICY); + LOG.debug("After set storage policy to NONE"); + // warning log is expected when passing some valid policy + FSUtils.setStoragePolicy(testFs, new Path("non-exist"), "HOT"); + } + + class AlwaysFailSetStoragePolicyFileSystem extends DistributedFileSystem { + @Override + public void setStoragePolicy(final Path src, final String policyName) + throws IOException { + throw new IOException("The setStoragePolicy method is invoked unexpectedly"); + } + } + /* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */ @Test public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception { verifyFileInDirWithStoragePolicy("ALL_SSD"); } + final String INVALID_STORAGE_POLICY = "1772"; + /* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */ @Test public void testSetStoragePolicyInvalid() throws Exception { - verifyFileInDirWithStoragePolicy("1772"); + verifyFileInDirWithStoragePolicy(INVALID_STORAGE_POLICY); } // Here instead of TestCommonFSUtils because we need a minicluster @@ -366,12 +390,25 @@ public class TestFSUtils { Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile"); fs.mkdirs(testDir); - FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY, - HConstants.DEFAULT_WAL_STORAGE_POLICY); + String storagePolicy = + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + FSUtils.setStoragePolicy(fs, testDir, storagePolicy); String file = UUID.randomUUID().toString(); Path p = new Path(testDir, file); WriteDataToHDFS(fs, p, 4096); + HFileSystem hfs = new HFileSystem(fs); + String policySet = hfs.getStoragePolicyName(p); + LOG.debug("The storage policy of path " + p + " is " + policySet); + if (policy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY) + || policy.equals(INVALID_STORAGE_POLICY)) { + String hdfsDefaultPolicy = hfs.getStoragePolicyName(hfs.getHomeDirectory()); + LOG.debug("The default hdfs storage policy (indicated by home path: " + + hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy); + Assert.assertEquals(hdfsDefaultPolicy, policySet); + } else { + Assert.assertEquals(policy, policySet); + } // will assert existance before deleting. cleanupFile(fs, testDir); } finally {