From 6a1846a6f3fe2b08cf515a3f3ae6f1c9f2123631 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 25 Jan 2017 14:22:49 -0800 Subject: [PATCH 2/2] HBASE-17538 HFS.setStoragePolicy() logs errors on local fs Policy is set in a number of places each with its own 'implementation'. A setStoragePolicy was added by: commit 629b04f44f19b9589c9bcfb84da0cf5e0d4d1f18 Author: Yu Li Date: Fri Jan 6 18:35:38 2017 +0800 HBASE-15172 Support setting storage policy in bulkload for hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java And in *FileSystem files and StoreFileWriter by commit f92a14ade635e4b081f3938620979b5864ac261f Author: Yu Li Date: Mon Jan 9 09:52:58 2017 +0800 HBASE-14061 Support CF-level Storage Policy This patch has all instances call the FSUtils#setStoragePolicy added here: commit eafc07a06d03c00e17bd476fa2b84ba7c8924b1e Author: tedyu Date: Thu Jan 15 08:52:30 2015 -0800 HBASE-12848 Utilize Flash storage for WAL It seems well-behaved enough. Wants a 'default' that will probably never be used. Using 'HOT' which seems to be what we have for default elsewhere in codebase. --- .../org/apache/hadoop/hbase/fs/HFileSystem.java | 8 +++----- .../hadoop/hbase/mapreduce/HFileOutputFormat2.java | 18 ++++++------------ .../hbase/regionserver/HRegionFileSystem.java | 13 +------------ .../hadoop/hbase/regionserver/StoreFileWriter.java | 9 ++------- .../java/org/apache/hadoop/hbase/util/FSUtils.java | 22 ++++++++++++++++++++++ 5 files changed, 34 insertions(+), 36 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index 904b8a4..36fa015 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hdfs.DFSClient; @@ -157,11 +158,8 @@ public class HFileSystem extends FilterFileSystem { * @param policyName The name of the storage policy. */ public void setStoragePolicy(Path path, String policyName) { - try { - ReflectionUtils.invokeMethod(this.fs, "setStoragePolicy", path, policyName); - } catch (Exception e) { - LOG.warn("Failed to set storage policy of [" + path + "] to [" + policyName + "]", e); - } + // Default 'HOT' storage! + FSUtils.setStoragePolicy(this.fs, this.fs.getConf(), path, policyName); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 6987bf7..04b0655 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -32,8 +32,6 @@ import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -47,28 +45,30 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; @@ -401,13 +401,7 @@ public class HFileOutputFormat2 conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(family), conf.get(STORAGE_POLICY_PROPERTY)); if (null != policy && !policy.trim().isEmpty()) { - try { - if (fs instanceof DistributedFileSystem) { - ((DistributedFileSystem) fs).setStoragePolicy(cfPath, policy.trim()); - } - } catch (Throwable e) { - LOG.warn("failed to set block storage policy of [" + cfPath + "] to [" + policy + "]", e); - } + FSUtils.setStoragePolicy(fs, conf, cfPath, policy.trim()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index e3d39ff..9d421df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -38,7 +36,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -56,7 +53,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSHDFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import com.google.common.collect.Lists; @@ -193,14 +189,7 @@ public class HRegionFileSystem { * @param policyName The name of the storage policy. */ public void setStoragePolicy(String familyName, String policyName) { - Path storeDir = getStoreDir(familyName); - try { - ReflectionUtils.invokeMethod(this.fs, "setStoragePolicy", storeDir, policyName); - } catch (Exception e) { - if (!(this.fs instanceof LocalFileSystem)) { - LOG.warn("Failed to set storage policy of [" + storeDir + "] to [" + policyName + "]", e); - } - } + FSUtils.setStoragePolicy(this.fs, this.fs.getConf(), getStoreDir(familyName), policyName); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index 786d58a..18fa532 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -41,6 +40,7 @@ import org.apache.hadoop.hbase.util.BloomContext; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.RowBloomContext; import org.apache.hadoop.hbase.util.RowColBloomContext; @@ -480,12 +480,7 @@ public class StoreFileWriter implements CellSink, ShipperListener { if (LOG.isTraceEnabled()) { LOG.trace("set block storage policy of [" + dir + "] to [" + policyName + "]"); } - - try { - ReflectionUtils.invokeMethod(this.fs, "setStoragePolicy", dir, policyName.trim()); - } catch (Exception e) { - LOG.warn("Failed to set storage policy of [" + dir + "] to [" + policyName + "]", e); - } + FSUtils.setStoragePolicy(this.fs, this.conf, dir, policyName.trim()); } if (filePath == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index abddd78..87c5d00 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -132,6 +132,28 @@ public abstract class FSUtils { * @param conf used to look up storage policy with given key; not modified. * @param path the Path whose storage policy is to be set * @param policyKey e.g. HConstants.WAL_STORAGE_POLICY + */ + public static void setStoragePolicy(final FileSystem fs, final Configuration conf, + final Path path, final String policyKey) { + // Default 'HOT' storage policy. + setStoragePolicy(fs, conf, path, policyKey, + org.apache.hadoop.hdfs.protocol.HdfsConstants.HOT_STORAGE_POLICY_NAME); + } + + /** + * Sets storage policy for given path according to config setting. + * If the passed path is a directory, we'll set the storage policy for all files + * created in the future in said directory. Note that this change in storage + * policy takes place at the HDFS level; it will persist beyond this RS's lifecycle. + * If we're running on a version of HDFS that doesn't support the given storage policy + * (or storage policies at all), then we'll issue a log message and continue. + * + * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html + * + * @param fs We only do anything if an instance of DistributedFileSystem + * @param conf used to look up storage policy with given key; not modified. + * @param path the Path whose storage policy is to be set + * @param policyKey e.g. HConstants.WAL_STORAGE_POLICY * @param defaultPolicy usually should be the policy NONE to delegate to HDFS */ public static void setStoragePolicy(final FileSystem fs, final Configuration conf, -- 2.8.4 (Apple Git-73)