diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 5335bef..20ee707 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -71,6 +71,10 @@ public class HColumnDescriptor implements Comparable { "ENCODE_ON_DISK"; public static final String DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING"; + + // storage policy for destination of flush + public static final String STORAGE_POLICY_FOR_FLUSH = "STORAGE_POLICY_FOR_FLUSH"; + public static final String DEFAULT_STORAGE_POLICY_FOR_FLUSH = "NONE"; /** * Key for the BLOCKCACHE attribute. * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we @@ -914,6 +918,23 @@ public class HColumnDescriptor implements Comparable { } /** + * @param policy the storage policy for destination of flush + * @return this (for chained invocation) + */ + public HColumnDescriptor setStoragePolicyForFlush(String policy) { + return setValue(STORAGE_POLICY_FOR_FLUSH, policy); + } + + /** + * @return storage policy + */ + public String getStoragePolicyForFlush() { + String value = getValue(STORAGE_POLICY_FOR_FLUSH); + if (value != null) return value; + return DEFAULT_STORAGE_POLICY_FOR_FLUSH; + } + + /** * @return true if we should cache data blocks on write * @deprecated Use {@link #isCacheDataOnWrite()} instead */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 047d689..edda38c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.ipc.RemoteException; @@ -214,7 +215,9 @@ public class HStore implements Store { this.fs = region.getRegionFileSystem(); // Assemble the store's home directory and Ensure it exists. - fs.createStoreDir(family.getNameAsString()); + Path storeDir = fs.createStoreDir(family.getNameAsString()); + FSUtils.setStoragePolicy(fs.getFileSystem(), storeDir, family.getStoragePolicyForFlush(), + HColumnDescriptor.DEFAULT_STORAGE_POLICY_FOR_FLUSH); this.region = region; this.family = family; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor @@ -963,9 +966,13 @@ public class HStore implements Store { } HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, cryptoContext); + Path p = fs.createTempName(); + fs.createDir(p); + FSUtils.setStoragePolicy(fs.getFileSystem(), p, family.getStoragePolicyForFlush(), + HColumnDescriptor.DEFAULT_STORAGE_POLICY_FOR_FLUSH); StoreFile.Writer w = new StoreFile.WriterBuilder(conf, writerCacheConf, this.getFileSystem()) - .withFilePath(fs.createTempName()) + .withFilePath(p) .withComparator(comparator) .withBloomType(family.getBloomFilterType()) .withMaxKeyCount(maxKeyCount) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 19b4719..ad3d784 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -491,7 +491,8 @@ public class FSHLog implements WAL { throw new IllegalArgumentException("wal suffix must start with '" + WAL_FILE_NAME_DELIMITER + "' but instead was '" + suffix + "'"); } - FSUtils.setStoragePolicy(fs, conf, this.fullPathLogDir, HConstants.WAL_STORAGE_POLICY, + FSUtils.setStoragePolicy(fs, this.fullPathLogDir, + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY), HConstants.DEFAULT_WAL_STORAGE_POLICY); this.logFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8"); this.prefixPathStr = new Path(fullPathLogDir, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index f5b0269..b54259f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -105,14 +105,13 @@ public abstract class FSUtils { /* * Sets storage policy for given path according to config setting * @param fs - * @param conf * @param path the Path whose storage policy is to be set - * @param policyKey + * @param policy * @param defaultPolicy */ - public static void setStoragePolicy(final FileSystem fs, final Configuration conf, - final Path path, final String policyKey, final String defaultPolicy) { - String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(); + public static void setStoragePolicy(final FileSystem fs, + final Path path, final String policy, final String defaultPolicy) { + String storagePolicy = policy.toUpperCase(); if (!storagePolicy.equals(defaultPolicy) && fs instanceof DistributedFileSystem) { DistributedFileSystem dfs = (DistributedFileSystem)fs;