From 11cfd5c603fdecbfff81ec5f8299dc3f17467017 Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Mon, 20 Apr 2015 00:31:57 -0500 Subject: [PATCH] HBASE-13498 Add more docs and a basic check for storage policy handling. --- .../java/org/apache/hadoop/hbase/HConstants.java | 6 +-- .../hadoop/hbase/regionserver/wal/FSHLog.java | 2 + .../java/org/apache/hadoop/hbase/util/FSUtils.java | 40 +++++++++++++----- .../org/apache/hadoop/hbase/util/TestFSUtils.java | 44 +++++++++++++++++++- 4 files changed, 77 insertions(+), 15 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index fc65c47..0b755d7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -957,9 +957,9 @@ public final class HConstants { /** Configuration name of WAL storage policy * Valid values are: - * NONE: no preference in destination of replicas - * ONE_SSD: place only one replica in SSD and the remaining in default storage - * and ALL_SSD: place all replica on SSD + * NONE: no preference in destination of block replicas + * ONE_SSD: place only one block replica in SSD and the remaining in default storage + * and ALL_SSD: place all block replicas on SSD * * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/ public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 443134d..60e36f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -491,6 +491,8 @@ public class FSHLog implements WAL { throw new IllegalArgumentException("wal suffix must start with '" + WAL_FILE_NAME_DELIMITER + "' but instead was '" + suffix + "'"); } + // Now that it exists, set the storage policy for the entire directory of wal files related to + // this FSHLog instance FSUtils.setStoragePolicy(fs, conf, this.fullPathLogDir, HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); this.logFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index e86054b..6a9107d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -103,20 +103,34 @@ public abstract class FSUtils { super(); } - /* - * Sets storage policy for given path according to config setting - * @param fs - * @param conf + /** + * Sets storage policy for given path according to config setting. + * If the passed path is a directory, we'll set the storage policy for all files + * created in the future in said directory. Note that this change in storage + * policy takes place at the HDFS level; it will persist beyond this RS's lifecycle. + * If we're running on a version of HDFS that doesn't support the given storage policy + * (or storage policies at all), then we'll issue a log message and continue. + * + * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html + * + * @param fs We only do anything if an instance of DistributedFileSystem + * @param conf used to look up storage policy with given key; not modified. * @param path the Path whose storage policy is to be set - * @param policyKey - * @param defaultPolicy + * @param policyKey e.g. HConstants.WAL_STORAGE_POLICY + * @param defaultPolicy usually should be the policy NONE to delegate to HDFS */ public static void setStoragePolicy(final FileSystem fs, final Configuration conf, final Path path, final String policyKey, final String defaultPolicy) { String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(); - if (!storagePolicy.equals(defaultPolicy) && - fs instanceof DistributedFileSystem) { + if (storagePolicy.equals(defaultPolicy)) { + if (LOG.isTraceEnabled()) { + LOG.trace("default policy of " + defaultPolicy + " requested, exiting early."); + } + return; + } + if (fs instanceof DistributedFileSystem) { DistributedFileSystem dfs = (DistributedFileSystem)fs; + // Once our minimum supported Hadoop version is 2.6.0 we can remove reflection. Class dfsClass = dfs.getClass(); Method m = null; try { @@ -125,10 +139,10 @@ public abstract class FSUtils { m.setAccessible(true); } catch (NoSuchMethodException e) { LOG.info("FileSystem doesn't support" - + " setStoragePolicy; --HDFS-7228 not available"); + + " setStoragePolicy; --HDFS-6584 not available"); } catch (SecurityException e) { LOG.info("Doesn't have access to setStoragePolicy on " - + "FileSystems --HDFS-7228 not available", e); + + "FileSystems --HDFS-6584 not available", e); m = null; // could happen on setAccessible() } if (m != null) { @@ -136,9 +150,13 @@ public abstract class FSUtils { m.invoke(dfs, path, storagePolicy); LOG.info("set " + storagePolicy + " for " + path); } catch (Exception e) { + // check for lack of HDFS-7228 LOG.warn("Unable to set " + storagePolicy + " for " + path, e); } } + } else { + LOG.info("FileSystem isn't an instance of DistributedFileSystem; presuming it doesn't " + + "support setStoragePolicy."); } } @@ -2060,4 +2078,4 @@ public abstract class FSUtils { return null; } } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index e2c1488..5cad9a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -345,6 +345,48 @@ public class TestFSUtils { } } + private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception { + HBaseTestingUtility htu = new HBaseTestingUtility(); + Configuration conf = htu.getConfiguration(); + conf.set(HConstants.WAL_STORAGE_POLICY, policy); + + MiniDFSCluster cluster = htu.startMiniDFSCluster(1); + try { + assertTrue(FSUtils.isHDFS(conf)); + + FileSystem fs = FileSystem.get(conf); + Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile"); + + FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY, + HConstants.DEFAULT_WAL_STORAGE_POLICY); + + String file = UUID.randomUUID().toString(); + Path p = new Path(testDir, file); + WriteDataToHDFS(fs, p, 4096); + // will assert existance before deleting. + cleanupFile(fs, testDir); + } finally { + cluster.shutdown(); + } + } + + @Test + public void testSetStoragePolicyDefault() throws Exception { + verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY); + } + + /* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */ + @Test + public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception { + verifyFileInDirWithStoragePolicy("ALL_SSD"); + } + + /* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */ + @Test + public void testSetStoragePolicyInvalid() throws Exception { + verifyFileInDirWithStoragePolicy("1772"); + } + /** * Ugly test that ensures we can get at the hedged read counters in dfsclient. * Does a bit of preading with hedged reads enabled using code taken from hdfs TestPread. @@ -493,4 +535,4 @@ public class TestFSUtils { assertTrue(fileSys.delete(name, true)); assertTrue(!fileSys.exists(name)); } -} \ No newline at end of file +} -- 1.7.10.2 (Apple Git-33)