diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index d6bdec0..d9c9a16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -135,6 +135,16 @@ public class CacheConfig {
private static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true;
/**
+ * Configuration key for multiple cache instances
+ */
+ public static final String REGION_SERVER_HASH_KEY = "hbase.blockcache.regionserver.hash";
+
+ /**
+ * Configuration default value for multiple cache instances
+ */
+ public static final String REGION_SERVER_HASH_DEFAULT = "NONE";
+
+ /**
* Enum of all built in external block caches.
* This is used for config.
*/
@@ -210,50 +220,76 @@ public class CacheConfig {
* family descriptor.
* @param conf hbase configuration
* @param family column family configuration
+ * @deprecated For tests only; to be removed.
+ * Use {@link #CacheConfig(Configuration, HColumnDescriptor, BlockCache)}
*/
+ @Deprecated
public CacheConfig(Configuration conf, HColumnDescriptor family) {
- this(CacheConfig.instantiateBlockCache(conf),
- family.isBlockCacheEnabled(),
- family.isInMemory(),
- // For the following flags we enable them regardless of per-schema settings
- // if they are enabled in the global configuration.
- conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY,
- DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(),
- conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
- DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(),
- conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
- DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(),
- conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY,
- DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(),
- conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
- conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
- DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
- conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
- HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
- conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
- );
+ this(conf, family, CacheConfig.instantiateBlockCache(conf));
+ }
+
+ /**
+ * Create a cache configuration using the specified configuration object and
+ * family descriptor.
+ * @param conf hbase configuration
+ * @param family column family configuration
+ * @param blockCache block cache instance
+ */
+ public CacheConfig(Configuration conf, HColumnDescriptor family, BlockCache blockCache) {
+ this(blockCache,
+ family.isBlockCacheEnabled(),
+ family.isInMemory(),
+ // For the following flags we enable them regardless of per-schema settings
+ // if they are enabled in the global configuration.
+ conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY,
+ DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(),
+ conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
+ DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(),
+ conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
+ DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(),
+ conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY,
+ DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(),
+ conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
+ conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
+ DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
+ conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
+ HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
+ conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
+ );
}
/**
* Create a cache configuration using the specified configuration object and
* defaults for family level settings.
* @param conf hbase configuration
+ * @deprecated For tests only; to be removed. Use {@link #CacheConfig(Configuration, BlockCache)}
*/
+ @Deprecated
public CacheConfig(Configuration conf) {
- this(CacheConfig.instantiateBlockCache(conf),
- DEFAULT_CACHE_DATA_ON_READ,
- DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set
- // strictly from conf
- conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
- conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE),
- conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE),
- conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
- conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
- conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN),
- conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
- HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1),
- conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
- );
+ this(conf, CacheConfig.instantiateBlockCache(conf));
+ }
+
+ /**
+ * Create a cache configuration using the specified configuration object and
+ * defaults for family level settings.
+ * @param conf hbase configuration
+ * @param blockCache block cache instance
+ */
+ public CacheConfig(Configuration conf, BlockCache blockCache) {
+ this(blockCache,
+ DEFAULT_CACHE_DATA_ON_READ,
+ DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set
+ // strictly from conf
+ conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
+ conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE),
+ conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE),
+ conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
+ conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
+ conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN),
+ conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
+ HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1),
+ conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
+ );
}
/**
@@ -506,25 +542,10 @@ public class CacheConfig {
", prefetchOnOpen=" + shouldPrefetchOnOpen();
}
- // Static block cache reference and methods
-
- /**
- * Static reference to the block cache, or null if no caching should be used
- * at all.
- */
- // Clear this if in tests you'd make more than one block cache instance.
- @VisibleForTesting
- static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE;
-
- /** Boolean whether we have disabled the block cache entirely. */
- @VisibleForTesting
- static boolean blockCacheDisabled = false;
-
static long getLruCacheSize(final Configuration conf, final MemoryUsage mu) {
float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
if (cachePercentage <= 0.0001f) {
- blockCacheDisabled = true;
return -1;
}
if (cachePercentage > 1.0) {
@@ -645,40 +666,38 @@ public class CacheConfig {
/**
* Returns the block cache or null in case none should be used.
- * Sets GLOBAL_BLOCK_CACHE_INSTANCE
+ * The static instance of BlockCache is removed, so it always create a new instance of BlockCache.
*
* @param conf The current configuration.
* @return The block cache or null.
*/
- public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
- if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
- if (blockCacheDisabled) return null;
+ public static BlockCache instantiateBlockCache(Configuration conf) {
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
LruBlockCache l1 = getL1(conf, mu);
- // blockCacheDisabled is set as a side-effect of getL1(), so check it again after the call.
- if (blockCacheDisabled) return null;
BlockCache l2 = getL2(conf, mu);
if (l2 == null) {
- GLOBAL_BLOCK_CACHE_INSTANCE = l1;
+ return l1;
} else {
boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
DEFAULT_BUCKET_CACHE_COMBINED);
+ BlockCache blockCache;
if (useExternal) {
- GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2);
+ blockCache = new InclusiveCombinedBlockCache(l1, l2);
} else {
if (combinedWithLru) {
- GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2);
+ blockCache = new CombinedBlockCache(l1, l2);
} else {
// L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler
// mechanism. It is a little ugly but works according to the following: when the
// background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get
// a block from the L1 cache, if not in L1, we will search L2.
- GLOBAL_BLOCK_CACHE_INSTANCE = l1;
+ blockCache = l1;
}
}
+ assert l1 != null;
l1.setVictimCache(l2);
+ return blockCache;
}
- return GLOBAL_BLOCK_CACHE_INSTANCE;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 35b232a..f4528bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -333,9 +333,8 @@ public class HFile {
*/
public static final WriterFactory getWriterFactoryNoCache(Configuration
conf) {
- Configuration tempConf = new Configuration(conf);
- tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
- return HFile.getWriterFactory(conf, new CacheConfig(tempConf));
+ // disable block cache
+ return HFile.getWriterFactory(conf, new CacheConfig(conf, (BlockCache)null));
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 3b041f0..30b742e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -57,11 +57,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.*;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -293,8 +289,6 @@ public class HFileOutputFormat2
DataBlockEncoding encoding = overriddenEncoding;
encoding = encoding == null ? datablockEncodingMap.get(family) : encoding;
encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
- Configuration tempConf = new Configuration(conf);
- tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
HFileContextBuilder contextBuilder = new HFileContextBuilder()
.withCompression(compression)
.withChecksumType(HStore.getChecksumType(conf))
@@ -305,12 +299,13 @@ public class HFileOutputFormat2
if (null == favoredNodes) {
wl.writer =
- new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
+ new StoreFile.WriterBuilder(conf, new CacheConfig(conf, (BlockCache)null), fs)
.withOutputDir(familydir).withBloomType(bloomType)
.withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext).build();
} else {
wl.writer =
- new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), new HFileSystem(fs))
+ new StoreFile.WriterBuilder(conf, new CacheConfig(conf, (BlockCache)null),
+ new HFileSystem(fs))
.withOutputDir(familydir).withBloomType(bloomType)
.withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext)
.withFavoredNodes(favoredNodes).build();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 7a59ea1..341d2b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -56,11 +56,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.*;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStore;
@@ -628,7 +624,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
throws IOException {
final Path hfilePath = item.hfilePath;
HFile.Reader hfr = HFile.createReader(fs, hfilePath,
- new CacheConfig(getConf()), getConf());
+ new CacheConfig(getConf(), (BlockCache)null), getConf());
final byte[] first, last;
try {
hfr.loadFileInfo();
@@ -828,7 +824,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
HColumnDescriptor familyDescriptor)
throws IOException {
FileSystem fs = inFile.getFileSystem(conf);
- CacheConfig cacheConf = new CacheConfig(conf);
+ CacheConfig cacheConf = new CacheConfig(conf, (BlockCache)null);
HalfStoreFileReader halfReader = null;
StoreFile.Writer halfWriter = null;
try {
@@ -927,7 +923,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
throws IOException {
Path hfile = hfileStatus.getPath();
HFile.Reader reader = HFile.createReader(fs, hfile,
- new CacheConfig(getConf()), getConf());
+ new CacheConfig(getConf(), (BlockCache)null), getConf());
try {
if (hcd.getCompressionType() != reader.getFileContext().getCompression()) {
hcd.setCompressionType(reader.getFileContext().getCompression());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index a43e6e7..a170784 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -67,9 +68,7 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
FileSystem fs = FileSystem.get(conf);
LOG.info("Cleaning the expired MOB files of " + family.getNameAsString() + " in " + tableName);
// disable the block cache.
- Configuration copyOfConf = new Configuration(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- CacheConfig cacheConfig = new CacheConfig(copyOfConf);
+ CacheConfig cacheConfig = new CacheConfig(conf, (BlockCache)null);
MobUtils.cleanExpiredMobFiles(fs, conf, tn, family, cacheConfig,
EnvironmentEdgeManager.currentTime());
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java
index 6c80355..6048802 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.mob;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
/**
@@ -28,17 +29,18 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
*/
@InterfaceAudience.Private
public class MobCacheConfig extends CacheConfig {
+ private final MobFileCache mobFileCache;
- private static MobFileCache mobFileCache;
-
- public MobCacheConfig(Configuration conf, HColumnDescriptor family) {
- super(conf, family);
- instantiateMobFileCache(conf);
+ public MobCacheConfig(Configuration conf, HColumnDescriptor family, BlockCache blockCache,
+ MobFileCache mobFileCache) {
+ super(conf, family, blockCache);
+ this.mobFileCache = mobFileCache;
}
- public MobCacheConfig(Configuration conf) {
- super(conf);
+ public MobCacheConfig(Configuration conf, BlockCache blockCache, MobFileCache mobFileCache) {
+ super(conf, blockCache);
instantiateMobFileCache(conf);
+ this.mobFileCache = mobFileCache;
}
/**
@@ -47,10 +49,7 @@ public class MobCacheConfig extends CacheConfig {
* @return The current instance of MobFileCache.
*/
public static synchronized MobFileCache instantiateMobFileCache(Configuration conf) {
- if (mobFileCache == null) {
- mobFileCache = new MobFileCache(conf);
- }
- return mobFileCache;
+ return new MobFileCache(conf);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 424a39b..41bcea3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
@@ -885,9 +886,7 @@ public class MobUtils {
public static void archiveMobStoreFiles(Configuration conf, FileSystem fs,
HRegionInfo mobRegionInfo, Path mobFamilyDir, byte[] family) throws IOException {
// disable the block cache.
- Configuration copyOfConf = HBaseConfiguration.create(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- CacheConfig cacheConfig = new CacheConfig(copyOfConf);
+ CacheConfig cacheConfig = new CacheConfig(conf, (BlockCache)null);
FileStatus[] fileStatus = FSUtils.listStatus(fs, mobFamilyDir);
List storeFileList = new ArrayList();
for (FileStatus file : fileStatus) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index dabedfd..e8a072b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.mob.MobConstants;
@@ -109,11 +110,10 @@ public class PartitionedMobCompactor extends MobCompactor {
tableName.getNamespaceAsString(), tableName.getQualifierAsString())));
compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX,
HConstants.COMPACTION_KV_MAX_DEFAULT);
- Configuration copyOfConf = new Configuration(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- compactionCacheConfig = new CacheConfig(copyOfConf);
+ // disable block cache
+ compactionCacheConfig = new CacheConfig(conf, (BlockCache)null);
tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, tableName.getName());
- cryptoContext = MobUtils.createEncryptionContext(copyOfConf, column);
+ cryptoContext = MobUtils.createEncryptionContext(conf, column);
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java
index 6d06dfc..7c06d33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepJob.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
@@ -107,9 +108,7 @@ public class SweepJob {
this.conf = conf;
this.fs = fs;
// disable the block cache.
- Configuration copyOfConf = new Configuration(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- cacheConfig = new CacheConfig(copyOfConf);
+ cacheConfig = new CacheConfig(conf, (BlockCache)null);
}
static ServerName getCurrentServerName(Configuration conf) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
index d39267b..c6ee1ed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.BufferedMutatorParams;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mob.MobConstants;
@@ -129,9 +130,7 @@ public class SweepReducer extends Reducer {
}
}
// disable the block cache.
- Configuration copyOfConf = new Configuration(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- this.cacheConfig = new CacheConfig(copyOfConf);
+ this.cacheConfig = new CacheConfig(conf, (BlockCache)null);
table = c.getBufferedMutator(new BufferedMutatorParams(tn).writeBufferSize(1*1024*1024));
memstore = new MemStoreWrapper(context, fs, table, family, new DefaultMemStore(), cacheConfig);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index faf6d81..ebb01cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -126,7 +126,7 @@ public class HMobStore extends HStore {
*/
@Override
protected void createCacheConf(HColumnDescriptor family) {
- cacheConf = new MobCacheConfig(conf, family);
+ cacheConf = new MobCacheConfig(conf, family, region.getBlockCache(), region.getMobFileCache());
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 459d56e..c721d71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -127,6 +127,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -359,6 +360,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
private volatile Optional configurationManager;
/**
+ * block cache instance for testing
+ */
+ private static BlockCache blockCache = null;
+
+ /**
+ * mob file cache instance for testing
+ */
+ private static MobFileCache mobFileCache = null;
+
+ /**
* @return The smallest mvcc readPoint across all the scanners in this
* region. Writes older than this readPoint, are included in every
* read operation.
@@ -1167,6 +1178,34 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
+ * Set block cache instance of HRegion for testing
+ * @param blockCache block cache instance
+ */
+ @VisibleForTesting
+ public static void setBlockCacheForTesting(BlockCache blockCache) {
+ HRegion.blockCache = blockCache;
+ }
+
+ public BlockCache getBlockCache() {
+ if (HRegion.blockCache != null) return HRegion.blockCache;
+ return rsServices == null ? null : rsServices.getBlockCache();
+ }
+
+ /**
+ * Set mob file cache instance of HRegion for testing
+ * @param mobFileCache block cache instance
+ */
+ @VisibleForTesting
+ public static void setMobFileCacheForTesting(MobFileCache mobFileCache) {
+ HRegion.mobFileCache = mobFileCache;
+ }
+
+ public MobFileCache getMobFileCache() {
+ if (HRegion.mobFileCache != null) return HRegion.mobFileCache;
+ return rsServices == null ? null : rsServices.getMobFileCache();
+ }
+
+ /**
* Reset recovering state of current region
*/
public void setRecovering(boolean newState) {
@@ -7976,9 +8015,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
processTable(fs, tableDir, wals, c, majorCompact);
} finally {
wals.close();
- // TODO: is this still right?
- BlockCache bc = new CacheConfig(c).getBlockCache();
- if (bc != null) bc.shutdown();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2ce2193..ad91d75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.http.InfoServer;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.RpcClient;
@@ -104,6 +105,7 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.mob.MobCacheConfig;
+import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
@@ -388,6 +390,10 @@ public class HRegionServer extends HasThread implements
private final RegionServerAccounting regionServerAccounting;
+ // Block cache instance
+ protected final BlockCache blockCache;
+ // Mob file cache instance
+ protected final MobFileCache mobFileCache;
// Cache configuration and block cache reference
protected CacheConfig cacheConfig;
// Cache configuration for mob
@@ -558,8 +564,10 @@ public class HRegionServer extends HasThread implements
login(userProvider, hostName);
regionServerAccounting = new RegionServerAccounting();
- cacheConfig = new CacheConfig(conf);
- mobCacheConfig = new MobCacheConfig(conf);
+ blockCache = CacheConfig.instantiateBlockCache(conf);
+ cacheConfig = new CacheConfig(conf, blockCache);
+ mobFileCache = MobCacheConfig.instantiateMobFileCache(conf);
+ mobCacheConfig = new MobCacheConfig(conf, blockCache, mobFileCache);
uncaughtExceptionHandler = new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
@@ -1379,7 +1387,7 @@ public class HRegionServer extends HasThread implements
// Save it in a file, this will allow to see if we crash
ZNodeClearer.writeMyEphemeralNodeOnDisk(getMyEphemeralNodePath());
- this.cacheConfig = new CacheConfig(conf);
+ this.cacheConfig = new CacheConfig(conf, blockCache);
this.walFactory = setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));
@@ -3301,6 +3309,13 @@ public class HRegionServer extends HasThread implements
}
/**
+ * @return The mob cache config instance used by the regionserver.
+ */
+ public MobCacheConfig getMobCacheConfig() {
+ return this.mobCacheConfig;
+ }
+
+ /**
* @return : Returns the ConfigurationManager object for testing purposes.
*/
protected ConfigurationManager getConfigurationManager() {
@@ -3343,6 +3358,16 @@ public class HRegionServer extends HasThread implements
return max;
}
+ @Override
+ public BlockCache getBlockCache() {
+ return blockCache;
+ }
+
+ @Override
+ public MobFileCache getMobFileCache() {
+ return mobFileCache;
+ }
+
/**
* For testing
* @return whether all wal roll request finished for this regionserver
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 133a5d9..854f6f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -336,7 +336,7 @@ public class HStore implements Store {
* @param family The current column family.
*/
protected void createCacheConf(final HColumnDescriptor family) {
- this.cacheConf = new CacheConfig(conf, family);
+ this.cacheConf = new CacheConfig(conf, family, region.getBlockCache());
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index cd4816c..4933587 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -30,8 +30,10 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
import org.apache.hadoop.hbase.wal.WAL;
@@ -231,4 +233,14 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
* @see org.apache.hadoop.hbase.regionserver.Store#getCompactionPressure()
*/
double getCompactionPressure();
+
+ /**
+ * @return block cache instance
+ */
+ BlockCache getBlockCache();
+
+ /**
+ * @return mob file cache instance
+ */
+ MobFileCache getMobFileCache();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
index ea43820..2215c0b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
@@ -35,12 +35,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.io.compress.Compression;
-import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.*;
import org.apache.hadoop.io.compress.Compressor;
/**
@@ -132,7 +127,8 @@ public class CompressionTest {
writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
writer.close();
Cell cc = null;
- HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
+ HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf, (BlockCache)null),
+ conf);
try {
reader.loadFileInfo();
HFileScanner scanner = reader.getScanner(false, true);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index fa138be..409a3c6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -109,6 +109,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -778,7 +779,7 @@ public class HBaseFsck extends Configured implements Closeable {
// For all the stores in this column family.
for (FileStatus storeFile : storeFiles) {
HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
- getConf()), getConf());
+ getConf(), (BlockCache)null), getConf());
if ((reader.getFirstKey() != null)
&& ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
((KeyValue.KeyOnlyKeyValue) reader.getFirstKey()).getKey()) > 0))) {
@@ -881,7 +882,7 @@ public class HBaseFsck extends Configured implements Closeable {
byte[] start, end;
HFile.Reader hf = null;
try {
- CacheConfig cacheConf = new CacheConfig(getConf());
+ CacheConfig cacheConf = new CacheConfig(getConf(), (BlockCache)null);
hf = HFile.createReader(fs, hfile.getPath(), cacheConf, getConf());
hf.loadFileInfo();
Cell startKv = hf.getFirstKey();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index c26c696..15aed19 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
import org.apache.hadoop.hbase.io.hfile.HFile;
@@ -82,7 +83,7 @@ public class HFileCorruptionChecker {
boolean quarantine) throws IOException {
this.conf = conf;
this.fs = FileSystem.get(conf);
- this.cacheConf = new CacheConfig(conf);
+ this.cacheConf = new CacheConfig(conf, (BlockCache)null);
this.executor = executor;
this.inQuarantineMode = quarantine;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index c126b19..07985cb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -33,9 +33,13 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
+import org.apache.hadoop.hbase.mob.MobCacheConfig;
+import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
import org.apache.hadoop.hbase.regionserver.CompactionRequestor;
@@ -64,6 +68,8 @@ public class MockRegionServerServices implements RegionServerServices {
new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
private HFileSystem hfs = null;
private final Configuration conf;
+ private final BlockCache blockCache;
+ private final MobFileCache mobFileCache;
private ZooKeeperWatcher zkw = null;
private ServerName serverName = null;
private RpcServerInterface rpcServer = null;
@@ -79,6 +85,8 @@ public class MockRegionServerServices implements RegionServerServices {
this.zkw = zkw;
this.serverName = serverName;
this.conf = (zkw == null ? new Configuration() : zkw.getConfiguration());
+ this.blockCache = CacheConfig.instantiateBlockCache(conf);
+ this.mobFileCache = MobCacheConfig.instantiateMobFileCache(conf);
}
MockRegionServerServices(){
@@ -87,6 +95,8 @@ public class MockRegionServerServices implements RegionServerServices {
public MockRegionServerServices(Configuration conf) {
this.conf = conf;
+ this.blockCache = null;
+ this.mobFileCache = null;
}
@Override
@@ -306,4 +316,14 @@ public class MockRegionServerServices implements RegionServerServices {
public double getCompactionPressure() {
return 0;
}
+
+ @Override
+ public BlockCache getBlockCache() {
+ return blockCache;
+ }
+
+ @Override
+ public MobFileCache getMobFileCache() {
+ return mobFileCache;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
index ce66e82..f602f7c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hbase.io.encoding;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
@@ -38,6 +39,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.IOTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -105,8 +107,9 @@ public class TestEncodedSeekers {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
LruBlockCache cache =
- (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
- cache.clearCache();
+ (LruBlockCache)CacheConfig.instantiateBlockCache(testUtil.getConfiguration());
+ assertNotNull(cache);
+ HRegion.setBlockCacheForTesting(cache);
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
index 4080249..ce4189b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
@@ -46,14 +46,11 @@ public class TestBlockCacheReporting {
@Before
public void setUp() throws Exception {
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
this.conf = HBaseConfiguration.create();
}
@After
public void tearDown() throws Exception {
- // Let go of current block cache.
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
}
private void addDataAndHits(final BlockCache bc, final int count) {
@@ -80,7 +77,8 @@ public class TestBlockCacheReporting {
public void testBucketCache() throws JsonGenerationException, JsonMappingException, IOException {
this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100);
- CacheConfig cc = new CacheConfig(this.conf);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ CacheConfig cc = new CacheConfig(this.conf, blockCache);
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
logPerBlock(cc.getBlockCache());
final int count = 3;
@@ -96,7 +94,8 @@ public class TestBlockCacheReporting {
@Test
public void testLruBlockCache() throws JsonGenerationException, JsonMappingException, IOException {
- CacheConfig cc = new CacheConfig(this.conf);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ CacheConfig cc = new CacheConfig(this.conf, blockCache);
assertTrue(cc.isBlockCacheEnabled());
assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
assertTrue(cc.getBlockCache() instanceof LruBlockCache);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index ad08794..380ccce 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -156,14 +156,11 @@ public class TestCacheConfig {
@Before
public void setUp() throws Exception {
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
this.conf = HBaseConfiguration.create();
}
@After
public void tearDown() throws Exception {
- // Let go of current block cache.
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
}
/**
@@ -212,7 +209,8 @@ public class TestCacheConfig {
@Test
public void testCacheConfigDefaultLRUBlockCache() {
- CacheConfig cc = new CacheConfig(this.conf);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ CacheConfig cc = new CacheConfig(this.conf, blockCache);
assertTrue(cc.isBlockCacheEnabled());
assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
basicBlockCacheOps(cc, false, true);
@@ -251,7 +249,8 @@ public class TestCacheConfig {
private void doBucketCacheConfigTest() {
final int bcSize = 100;
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
- CacheConfig cc = new CacheConfig(this.conf);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ CacheConfig cc = new CacheConfig(this.conf, blockCache);
basicBlockCacheOps(cc, false, false);
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
// TODO: Assert sizes allocated are right and proportions.
@@ -284,7 +283,8 @@ public class TestCacheConfig {
assertTrue(lruExpectedSize < bcExpectedSize);
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY, false);
- CacheConfig cc = new CacheConfig(this.conf);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ CacheConfig cc = new CacheConfig(this.conf, blockCache);
basicBlockCacheOps(cc, false, false);
assertTrue(cc.getBlockCache() instanceof LruBlockCache);
// TODO: Assert sizes allocated are right and proportions.
@@ -329,7 +329,8 @@ public class TestCacheConfig {
public void testCacheDataInL1() {
this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100);
- CacheConfig cc = new CacheConfig(this.conf);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ CacheConfig cc = new CacheConfig(this.conf, blockCache);
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache();
// Add a data block. Should go into L2, into the Bucket Cache, not the LruBlockCache.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfigMultipleInstances.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfigMultipleInstances.java
new file mode 100644
index 0000000..5fa677f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfigMultipleInstances.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests that multiple block cache instance within a single process do as expected.
+ */
+@Category({IOTests.class, MediumTests.class})
+public class TestCacheConfigMultipleInstances {
+ private static final Log LOG = LogFactory.getLog(TestCacheConfigMultipleInstances.class);
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final int NUM_MASTERS = 1;
+ private static final int NUM_SLAVES = 2;
+ private static final String CF = "d";
+ private static Connection connection;
+ private static Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_SLAVES);
+ connection = TEST_UTIL.getConnection();
+ admin = connection.getAdmin();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ if (admin != null) admin.close();
+ if (connection != null) connection.close();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testMultipleBlockCacheInstances() throws Exception {
+ HRegionServer[] regionServers = new HRegionServer[NUM_SLAVES];
+ for (int i = 0; i < NUM_SLAVES; i++)
+ regionServers[i] = TEST_UTIL.getHBaseCluster().getRegionServer(i);
+
+ TableName tableName = TableName.valueOf("test");
+ createTable(tableName, CF);
+
+ int servingRegionServer = findServingRegionServer(tableName, regionServers);
+
+ generateTestData(tableName);
+
+ long[] hitCountsPrev = new long[NUM_SLAVES];
+ long[] missCountsPrev = new long[NUM_SLAVES];
+ long[] hitCountsCur = new long[NUM_SLAVES];
+ long[] missCountsCur = new long[NUM_SLAVES];
+
+ // initial stats
+ gatherStats(regionServers, hitCountsPrev, missCountsPrev, hitCountsCur, missCountsCur);
+
+ // first scan. cache miss is expected
+ scanTestData(tableName);
+ gatherStats(regionServers, hitCountsPrev, missCountsPrev, hitCountsCur, missCountsCur);
+ for (int i = 0; i < NUM_SLAVES; i++) {
+ LOG.info("hitCountsPrev[" + i + "]=" + hitCountsPrev[i]
+ + ", missCountsPrev[" + i + "]=" + missCountsPrev[i]
+ + ", hitCountsCur[" + i + "]=" + hitCountsCur[i]
+ + ", missCountsCur[" + i + "]=" + missCountsCur[i]);
+ if (i == servingRegionServer) {
+ assertEquals(0, hitCountsCur[i] - hitCountsPrev[i]);
+ assertEquals(1, missCountsCur[i] - missCountsPrev[i]);
+ } else {
+ assertEquals(0, hitCountsCur[i] - hitCountsPrev[i]);
+ assertEquals(0, missCountsCur[i] - missCountsPrev[i]);
+ }
+ }
+
+ // second scan. cache hit is expected
+ scanTestData(tableName);
+ gatherStats(regionServers, hitCountsPrev, missCountsPrev, hitCountsCur, missCountsCur);
+ for (int i = 0; i < NUM_SLAVES; i++) {
+ LOG.info("hitCountsPrev[" + i + "]=" + hitCountsPrev[i]
+ + ", missCountsPrev[" + i + "]=" + missCountsPrev[i]
+ + ", hitCountsCur[" + i + "]=" + hitCountsCur[i]
+ + ", missCountsCur[" + i + "]=" + missCountsCur[i]);
+ if (i == servingRegionServer) {
+ assertEquals(1, hitCountsCur[i] - hitCountsPrev[i]);
+ assertEquals(0, missCountsCur[i] - missCountsPrev[i]);
+ } else {
+ assertEquals(0, hitCountsCur[i] - hitCountsPrev[i]);
+ assertEquals(0, missCountsCur[i] - missCountsPrev[i]);
+ }
+ }
+ }
+
+ private void scanTestData(TableName tableName) throws IOException {
+ try (Table table = connection.getTable(tableName)) {
+ Scan scan = new Scan();
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ scanner.next();
+ }
+ }
+ }
+
+ private void gatherStats(HRegionServer[] regionServers, long[] hitCountsPrev,
+ long[] missCountsPrev, long[] hitCountsCur, long[] missCountsCur) {
+ for (int i = 0; i < NUM_SLAVES; i++) {
+ hitCountsPrev[i] = hitCountsCur[i];
+ missCountsPrev[i] = missCountsCur[i];
+ hitCountsCur[i] = getHitCount(regionServers, i);
+ missCountsCur[i] = getMissCount(regionServers, i);
+ }
+ }
+
+ private long getHitCount(HRegionServer[] regionServers, int servingRegionServer) {
+ return regionServers[servingRegionServer].getCacheConfig()
+ .getBlockCache().getStats().getHitCount();
+ }
+
+ private long getMissCount(HRegionServer[] regionServers, int servingRegionServer) {
+ return regionServers[servingRegionServer].getCacheConfig().
+ getBlockCache().getStats().getMissCount();
+ }
+
+ private void generateTestData(TableName tableName) throws IOException {
+ try (Table table = connection.getTable(tableName)) {
+ Put put = new Put("a".getBytes());
+ put.addColumn(CF.getBytes(), "c1".getBytes(), "a".getBytes());
+ table.put(put);
+
+ // flush memstore to use block cache
+ admin.flush(tableName);
+ }
+ }
+
+ private int findServingRegionServer(TableName tableName, HRegionServer[] regionServers)
+ throws IOException {
+ int servingRegionServerIndex = -1;
+ for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[0].getServerName())) {
+ if (hRegionInfo.getTable().equals(tableName)) {
+ servingRegionServerIndex = 0;
+ break;
+ }
+ }
+ if (servingRegionServerIndex < 0) {
+ for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[1].getServerName())) {
+ if (hRegionInfo.getTable().equals(tableName)) {
+ servingRegionServerIndex = 1;
+ break;
+ }
+ }
+ }
+ return servingRegionServerIndex;
+ }
+
+ private void createTable(TableName tableName, String CF) throws IOException {
+ HTableDescriptor td = new HTableDescriptor(tableName);
+ HColumnDescriptor cd = new HColumnDescriptor(CF);
+ td.addFamily(cd);
+ admin.createTable(td);
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index 2c957ef..bb17830 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -155,7 +155,8 @@ public class TestCacheOnWrite {
Configuration conf = TEST_UTIL.getConfiguration();
List blockcaches = new ArrayList();
// default
- blockcaches.add(new CacheConfig(conf).getBlockCache());
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ blockcaches.add(blockCache);
// memory
BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration());
@@ -221,7 +222,6 @@ public class TestCacheOnWrite {
conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
cowType.modifyConf(conf);
fs = HFileSystem.get(conf);
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
cacheConf =
new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
cowType.shouldBeCached(BlockType.LEAF_INDEX),
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index 1635310..68ef0bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -25,6 +25,7 @@ import java.util.Collection;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.testclassification.IOTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.client.Get;
@@ -95,8 +96,6 @@ public class TestForceCacheImportantBlocks {
@Before
public void setup() {
- // Make sure we make a new one each time.
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
HFile.dataBlockReadCnt.set(0);
}
@@ -109,6 +108,8 @@ public class TestForceCacheImportantBlocks {
setBloomFilterType(BLOOM_TYPE);
hcd.setBlocksize(BLOCK_SIZE);
hcd.setBlockCacheEnabled(cfCacheEnabled);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
+ HRegion.setBlockCacheForTesting(blockCache);
Region region = TEST_UTIL.createTestRegion(TABLE, hcd);
BlockCache cache = region.getStore(hcd.getName()).getCacheConfig().getBlockCache();
CacheStats stats = cache.getStats();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 8a843a3..a7d3f46 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -525,8 +525,8 @@ public class TestHFileBlockIndex {
public void testHFileWriterAndReader() throws IOException {
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
"hfile_for_block_index");
- CacheConfig cacheConf = new CacheConfig(conf);
- BlockCache blockCache = cacheConf.getBlockCache();
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ CacheConfig cacheConf = new CacheConfig(conf, blockCache);
for (int testI = 0; testI < INDEX_CHUNK_SIZES.length; ++testI) {
int indexBlockSize = INDEX_CHUNK_SIZES[testI];
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java
index 0067417..6281d5e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java
@@ -73,13 +73,11 @@ public class TestLazyDataBlockDecompression {
@Before
public void setUp() throws IOException {
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
fs = FileSystem.get(TEST_UTIL.getConfiguration());
}
@After
public void tearDown() {
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
fs = null;
}
@@ -144,15 +142,17 @@ public class TestLazyDataBlockDecompression {
.build();
LOG.info("context=" + context);
+ BlockCache blockCache;
+
// setup cache with lazy-decompression disabled.
Configuration lazyCompressDisabled = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
lazyCompressDisabled.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, cacheOnWrite);
lazyCompressDisabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cacheOnWrite);
lazyCompressDisabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cacheOnWrite);
lazyCompressDisabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, false);
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE =
+ blockCache =
new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressDisabled);
- CacheConfig cc = new CacheConfig(lazyCompressDisabled);
+ CacheConfig cc = new CacheConfig(lazyCompressDisabled, blockCache);
assertFalse(cc.shouldCacheDataCompressed());
assertTrue(cc.getBlockCache() instanceof LruBlockCache);
LruBlockCache disabledBlockCache = (LruBlockCache) cc.getBlockCache();
@@ -185,9 +185,9 @@ public class TestLazyDataBlockDecompression {
lazyCompressEnabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cacheOnWrite);
lazyCompressEnabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cacheOnWrite);
lazyCompressEnabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true);
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE =
- new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressEnabled);
- cc = new CacheConfig(lazyCompressEnabled);
+ blockCache =
+ new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressDisabled);
+ cc = new CacheConfig(lazyCompressEnabled, blockCache);
assertTrue("test improperly configured.", cc.shouldCacheDataCompressed());
assertTrue(cc.getBlockCache() instanceof LruBlockCache);
LruBlockCache enabledBlockCache = (LruBlockCache) cc.getBlockCache();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
index 053a27e..bef9757 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
@@ -56,8 +56,8 @@ public class TestPrefetch {
conf = TEST_UTIL.getConfiguration();
conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
fs = HFileSystem.get(conf);
- CacheConfig.blockCacheDisabled = false;
- cacheConf = new CacheConfig(conf);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ cacheConf = new CacheConfig(conf, blockCache);
}
@Test(timeout=60000)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
index bb6f899..b2588da 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
@@ -84,6 +84,8 @@ public class TestScannerFromBucketCache {
conf.setFloat("hfile.block.cache.size", 0.2f);
conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f);
}
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ HRegion.setBlockCacheForTesting(blockCache);
tableName = TableName.valueOf(name.getMethodName());
}
@@ -92,7 +94,6 @@ public class TestScannerFromBucketCache {
EnvironmentEdgeManagerTestHelper.reset();
LOG.info("Cleaning test directory: " + test_util.getDataTestDir());
test_util.cleanupTestDir();
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
}
String getName() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
index 9c6bb38..f46450e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -99,6 +100,8 @@ public class TestScannerSelectionUsingKeyRange {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(TABLE);
+ LruBlockCache cache = (LruBlockCache) CacheConfig.instantiateBlockCache(conf);
+ HRegion.setBlockCacheForTesting(cache);
Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf,
htd);
@@ -115,9 +118,6 @@ public class TestScannerSelectionUsingKeyRange {
}
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
- CacheConfig.blockCacheDisabled = false;
- CacheConfig cacheConf = new CacheConfig(conf);
- LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
cache.clearCache();
InternalScanner scanner = region.getScanner(scan);
List results = new ArrayList();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
index 08b259d..8c92b93 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
@@ -131,9 +131,7 @@ public class TestScannerSelectionUsingTTL {
Scan scan = new Scan();
scan.setMaxVersions(Integer.MAX_VALUE);
- CacheConfig cacheConf = new CacheConfig(conf);
- LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
- cache.clearCache();
+ LruBlockCache cache = (LruBlockCache) CacheConfig.instantiateBlockCache(conf);
InternalScanner scanner = region.getScanner(scan);
List results = new ArrayList| ();
final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index eb8f803..6421e10 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -43,9 +43,13 @@ import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
+import org.apache.hadoop.hbase.mob.MobCacheConfig;
+import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
@@ -127,6 +131,8 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
private final ServerName sn;
private final ZooKeeperWatcher zkw;
private final Configuration conf;
+ private final BlockCache blockCache;
+ private final MobFileCache mobFileCache;
private final Random random = new Random();
/**
@@ -182,6 +188,8 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
this.sn = sn;
this.conf = conf;
this.zkw = new ZooKeeperWatcher(conf, sn.toString(), this, true);
+ this.blockCache = CacheConfig.instantiateBlockCache(conf);
+ this.mobFileCache = MobCacheConfig.instantiateMobFileCache(conf);
}
/**
@@ -651,4 +659,14 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
public double getCompactionPressure() {
return 0;
}
+
+ @Override
+ public BlockCache getBlockCache() {
+ return blockCache;
+ }
+
+ @Override
+ public MobFileCache getMobFileCache() {
+ return mobFileCache;
+ }
}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCacheConfigMultipleInstances.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCacheConfigMultipleInstances.java
new file mode 100644
index 0000000..376ffcf
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCacheConfigMultipleInstances.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+
+/**
+ * Tests that multiple mob file cache instance within a single process do as expected.
+ */
+@Category(MediumTests.class)
+public class TestMobCacheConfigMultipleInstances {
+ private static final Log LOG = LogFactory.getLog(TestMobCacheConfigMultipleInstances.class);
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final int NUM_MASTERS = 1;
+ private static final int NUM_SLAVES = 2;
+ private static final String CF = "d";
+ private static Connection connection;
+ private static Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_SLAVES);
+ connection = TEST_UTIL.getConnection();
+ admin = connection.getAdmin();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ if (admin != null) admin.close();
+ if (connection != null) connection.close();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testMultipleBlockCacheInstances() throws Exception {
+ HRegionServer[] regionServers = new HRegionServer[NUM_SLAVES];
+ for (int i = 0; i < NUM_SLAVES; i++)
+ regionServers[i] = TEST_UTIL.getHBaseCluster().getRegionServer(i);
+
+ TableName tableName = TableName.valueOf("test");
+ createTable(tableName, CF);
+
+ int servingRegionServer = findServingRegionServer(tableName, regionServers);
+
+ generateTestData(tableName);
+
+ long[] accessCountsPrev = new long[NUM_SLAVES];
+ long[] missCountsPrev = new long[NUM_SLAVES];
+ long[] accessCountsCur = new long[NUM_SLAVES];
+ long[] missCountsCur = new long[NUM_SLAVES];
+
+ // initial stats
+ gatherStats(regionServers, accessCountsPrev, missCountsPrev, accessCountsCur, missCountsCur);
+
+ // first scan. cache miss is expected
+ scanTestData(tableName);
+ gatherStats(regionServers, accessCountsPrev, missCountsPrev, accessCountsCur, missCountsCur);
+ for (int i = 0; i < NUM_SLAVES; i++) {
+ LOG.info("accessCountsPrev[" + i + "]=" + accessCountsPrev[i]
+ + ", missCountsPrev[" + i + "]=" + missCountsPrev[i]
+ + ", accessCountsCur[" + i + "]=" + accessCountsCur[i]
+ + ", missCountsCur[" + i + "]=" + missCountsCur[i]);
+ if (i == servingRegionServer) {
+ Assert.assertEquals(1, accessCountsCur[i] - accessCountsPrev[i]);
+ Assert.assertEquals(1, missCountsCur[i] - missCountsPrev[i]);
+ } else {
+ Assert.assertEquals(0, accessCountsCur[i] - accessCountsPrev[i]);
+ Assert.assertEquals(0, missCountsCur[i] - missCountsPrev[i]);
+ }
+ }
+
+ // second scan. cache miss is not expected
+ scanTestData(tableName);
+ gatherStats(regionServers, accessCountsPrev, missCountsPrev, accessCountsCur, missCountsCur);
+ for (int i = 0; i < NUM_SLAVES; i++) {
+ LOG.info("accessCountsPrev[" + i + "]=" + accessCountsPrev[i]
+ + ", missCountsPrev[" + i + "]=" + missCountsPrev[i]
+ + ", accessCountsCur[" + i + "]=" + accessCountsCur[i]
+ + ", missCountsCur[" + i + "]=" + missCountsCur[i]);
+ if (i == servingRegionServer) {
+ Assert.assertEquals(1, accessCountsCur[i] - accessCountsPrev[i]);
+ Assert.assertEquals(0, missCountsCur[i] - missCountsPrev[i]);
+ } else {
+ Assert.assertEquals(0, accessCountsCur[i] - accessCountsPrev[i]);
+ Assert.assertEquals(0, missCountsCur[i] - missCountsPrev[i]);
+ }
+ }
+ }
+
+ private void scanTestData(TableName tableName) throws IOException {
+ try (Table table = connection.getTable(tableName)) {
+ Scan scan = new Scan();
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ scanner.next();
+ }
+ }
+ }
+
+ private void gatherStats(HRegionServer[] regionServers, long[] accessCountsPrev,
+ long[] missCountsPrev, long[] accessCountsCur, long[] missCountsCur) {
+ for (int i = 0; i < NUM_SLAVES; i++) {
+ accessCountsPrev[i] = accessCountsCur[i];
+ missCountsPrev[i] = missCountsCur[i];
+ accessCountsCur[i] = getAccessCount(regionServers, i);
+ missCountsCur[i] = getMissCount(regionServers, i);
+ }
+ }
+
+ private long getAccessCount(HRegionServer[] regionServers, int servingRegionServer) {
+ return regionServers[servingRegionServer].getMobCacheConfig()
+ .getMobFileCache().getAccessCount();
+ }
+
+ private long getMissCount(HRegionServer[] regionServers, int servingRegionServer) {
+ return regionServers[servingRegionServer].getMobCacheConfig()
+ .getMobFileCache().getMissCount();
+ }
+
+ private void generateTestData(TableName tableName) throws IOException {
+ try (Table table = connection.getTable(tableName)) {
+ Put put = new Put("a".getBytes());
+ put.addColumn(CF.getBytes(), "c1".getBytes(), "a".getBytes());
+ table.put(put);
+
+ // flush memstore to use block cache
+ admin.flush(tableName);
+ }
+ }
+
+ private int findServingRegionServer(TableName tableName, HRegionServer[] regionServers)
+ throws IOException {
+ int servingRegionServerIndex = -1;
+ for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[0].getServerName())) {
+ if (hRegionInfo.getTable().equals(tableName)) {
+ servingRegionServerIndex = 0;
+ break;
+ }
+ }
+ if (servingRegionServerIndex < 0) {
+ for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[1].getServerName())) {
+ if (hRegionInfo.getTable().equals(tableName)) {
+ servingRegionServerIndex = 1;
+ break;
+ }
+ }
+ }
+ return servingRegionServerIndex;
+ }
+
+ private void createTable(TableName tableName, String CF) throws IOException {
+ HTableDescriptor td = new HTableDescriptor(tableName);
+ HColumnDescriptor cd = new HColumnDescriptor(CF);
+ cd.setMobEnabled(true);
+ cd.setMobThreshold(0);
+ td.addFamily(cd);
+ admin.createTable(td);
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
index 95fa1b9..41e4218 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.regionserver.HMobStore;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -91,6 +93,8 @@ public class TestMobFileCache extends TestCase {
htd.addFamily(hcd2);
htd.addFamily(hcd3);
region = UTIL.createLocalHRegion(htd, null, null);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ HRegion.setBlockCacheForTesting(blockCache);
}
@Override
@@ -113,7 +117,7 @@ public class TestMobFileCache extends TestCase {
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setMaxVersions(4);
hcd.setMobEnabled(true);
- mobCacheConf = new MobCacheConfig(conf, hcd);
+ mobCacheConf = new MobCacheConfig(conf, hcd, region.getBlockCache(), region.getMobFileCache());
return createMobStoreFile(hcd);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
index 2dfbee6..5d60532 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
@@ -102,8 +102,9 @@ public class TestBlocksRead {
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
+ blockCache = CacheConfig.instantiateBlockCache(conf);
+ HRegion.setBlockCacheForTesting(blockCache);
Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
- blockCache = new CacheConfig(conf).getBlockCache();
return r;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
index b2ba97c..86ed30d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -89,11 +90,14 @@ public class TestBlocksScanned extends HBaseTestCase {
}
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ HRegion.setBlockCacheForTesting(blockCache);
Region r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration());
addContent(r, FAMILY, COL);
r.flush(true);
- CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
+ assert blockCache != null;
+ CacheStats stats = blockCache.getStats();
long before = stats.getHitCount() + stats.getMissCount();
// Do simple test of getting one row only first.
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index da3c17d..b5cdf1b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -173,6 +173,8 @@ public class TestCacheOnWriteInSchema {
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
walFactory = new WALFactory(conf, null, id);
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ HRegion.setBlockCacheForTesting(blockCache);
region = TEST_UTIL.createLocalHRegion(info, htd,
walFactory.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()));
store = new HStore(region, hcd, conf);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
index 5b86169..72ecde8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
@@ -134,8 +134,8 @@ public class TestCompoundBloomFilter {
fs = FileSystem.get(conf);
- cacheConf = new CacheConfig(conf);
- blockCache = cacheConf.getBlockCache();
+ blockCache = CacheConfig.instantiateBlockCache(conf);
+ cacheConf = new CacheConfig(conf, blockCache);
assertNotNull(blockCache);
}
@@ -295,7 +295,7 @@ public class TestCompoundBloomFilter {
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
BLOOM_BLOCK_SIZES[t]);
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
- cacheConf = new CacheConfig(conf);
+ cacheConf = new CacheConfig(conf, blockCache);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs)
.withOutputDir(TEST_UTIL.getDataTestDir())
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index 47b6b5c..d11b406 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -54,7 +54,9 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
import org.apache.hadoop.hbase.io.crypto.aes.AES;
import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.mob.MobCacheConfig;
import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
@@ -155,6 +157,8 @@ public class TestHMobStore {
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
+ MobFileCache mobFileCache = MobCacheConfig.instantiateMobFileCache(conf);
+ HRegion.setMobFileCacheForTesting(mobFileCache);
final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, basedir);
final WALFactory wals = new WALFactory(walConf, null, methodName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
index f9ffc88..7ed2d04 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
@@ -47,7 +47,9 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobCacheConfig;
import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -108,6 +110,8 @@ public class TestMobStoreCompaction {
hcd.setMaxVersions(1);
htd.modifyFamily(hcd);
+ MobFileCache mobFileCache = MobCacheConfig.instantiateMobFileCache(conf);
+ HRegion.setMobFileCacheForTesting(mobFileCache);
region = UTIL.createLocalHRegion(htd, null, null);
fs = FileSystem.get(conf);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
index fc47d7e..a02affe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -106,6 +108,8 @@ public class TestRecoveredEdits {
LOG.info("Region directory already exists. Deleting.");
fs.delete(hrfs.getRegionDir(), true);
}
+ BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
+ HRegion.setBlockCacheForTesting(blockCache);
HRegion region = HRegion.createHRegion(hri, hbaseRootDir, conf, htd, null);
assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
List storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
index 26b3293..fe6bf8d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -72,6 +73,8 @@ import com.google.common.collect.ImmutableList;
@Category({RegionServerTests.class, SmallTests.class})
public class TestSplitTransaction {
private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final BlockCache blockCache =
+ CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
private final Path testdir =
TEST_UTIL.getDataTestDir(this.getClass().getName());
private HRegion parent;
@@ -85,7 +88,7 @@ public class TestSplitTransaction {
private static boolean preRollBackCalled = false;
private static boolean postRollBackCalled = false;
-
+
@Before public void setup() throws IOException {
this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
TEST_UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName());
@@ -93,7 +96,8 @@ public class TestSplitTransaction {
final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
FSUtils.setRootDir(walConf, this.testdir);
this.wals = new WALFactory(walConf, null, this.getClass().getName());
-
+
+ HRegion.setBlockCacheForTesting(blockCache);
this.parent = createRegion(this.testdir, this.wals);
RegionCoprocessorHost host = new RegionCoprocessorHost(this.parent, null, TEST_UTIL.getConfiguration());
this.parent.setCoprocessorHost(host);
@@ -216,8 +220,7 @@ public class TestSplitTransaction {
// Pretend region's blocks are not in the cache, used for
// testWholesomeSplitWithHFileV1
- CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
- ((LruBlockCache) cacheConf.getBlockCache()).clearCache();
+ ((LruBlockCache) blockCache).clearCache();
// Start transaction.
SplitTransactionImpl st = prepareGOOD_SPLIT_ROW();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 85e4439..ce6f249 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -847,7 +847,7 @@ public class TestStoreFile extends HBaseTestCase {
Path baseDir = new Path(new Path(testDir, "7e0102"),"twoCOWEOC");
// Grab the block cache and get the initial hit/miss counts
- BlockCache bc = new CacheConfig(conf).getBlockCache();
+ BlockCache bc = CacheConfig.instantiateBlockCache(conf);
assertNotNull(bc);
CacheStats cs = bc.getStats();
long startHit = cs.getHitCount();
@@ -856,7 +856,7 @@ public class TestStoreFile extends HBaseTestCase {
// Let's write a StoreFile with three blocks, with cache on write off
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
- CacheConfig cacheConf = new CacheConfig(conf);
+ CacheConfig cacheConf = new CacheConfig(conf, bc);
Path pathCowOff = new Path(baseDir, "123456789");
StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
@@ -878,7 +878,7 @@ public class TestStoreFile extends HBaseTestCase {
// Now write a StoreFile with three blocks, with cache on write on
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
- cacheConf = new CacheConfig(conf);
+ cacheConf = new CacheConfig(conf, bc);
Path pathCowOn = new Path(baseDir, "123456788");
writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
@@ -935,7 +935,7 @@ public class TestStoreFile extends HBaseTestCase {
// Let's close the first file with evict on close turned on
conf.setBoolean("hbase.rs.evictblocksonclose", true);
- cacheConf = new CacheConfig(conf);
+ cacheConf = new CacheConfig(conf, bc);
hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
BloomType.NONE);
reader = hsf.createReader();
@@ -949,7 +949,7 @@ public class TestStoreFile extends HBaseTestCase {
// Let's close the second file with evict on close turned off
conf.setBoolean("hbase.rs.evictblocksonclose", false);
- cacheConf = new CacheConfig(conf);
+ cacheConf = new CacheConfig(conf, bc);
hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
BloomType.NONE);
reader = hsf.createReader();
| | | |