diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 7f85dc7..22cb2a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -545,7 +545,6 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable */ @InterfaceAudience.Private + @Deprecated public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1"; + @Deprecated private static final Bytes CACHE_DATA_IN_L1_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_IN_L1)); /** @@ -233,6 +235,7 @@ public class ColumnFamilyDescriptorBuilder { * sense if more than one tier in operations: i.e. if we have an L1 and a L2. * This will be the cases if we are using BucketCache. */ + @Deprecated public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false; /** @@ -444,8 +447,11 @@ public class ColumnFamilyDescriptorBuilder { return this; } + /** + * @deprecated Since 2.0 and will be removed in 3.0. This is a noop from 2.0. + */ + @Deprecated public ColumnFamilyDescriptorBuilder setCacheDataInL1(boolean value) { - desc.setCacheDataInL1(value); return this; } @@ -1011,21 +1017,6 @@ public class ColumnFamilyDescriptorBuilder { } @Override - public boolean isCacheDataInL1() { - return getStringOrDefault(CACHE_DATA_IN_L1_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_IN_L1); - } - - /** - * @param value true if we should cache data blocks in the L1 cache (if - * block cache deploy has more than one tier; e.g. we are using - * CombinedBlockCache). - * @return this (for chained invocation) - */ - public ModifyableColumnFamilyDescriptor setCacheDataInL1(boolean value) { - return setValue(CACHE_DATA_IN_L1_BYTES, Boolean.toString(value)); - } - - @Override public boolean isCacheIndexesOnWrite() { return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE); } diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index be23dc8..a9b29a3 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -901,13 +901,6 @@ possible configurations would overwhelm and obscure the important. - hbase.bucketcache.combinedcache.enabled - true - Whether or not the bucketcache is used in league with the LRU - on-heap block cache. In this mode, indices and blooms are kept in the LRU - blockcache and the data blocks are kept in the bucketcache. - - hbase.bucketcache.size A float that EITHER represents a percentage of total heap memory diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index c05499c..b12ac1d 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -110,10 +110,7 @@ public class MemcachedBlockCache implements BlockCache { } @Override - public void cacheBlock(BlockCacheKey cacheKey, - Cacheable buf, - boolean inMemory, - boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { cacheBlock(cacheKey, buf); } @@ -288,10 +285,4 @@ public class MemcachedBlockCache implements BlockCache { return MAX_SIZE; } } - - @Override - public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { - // Not doing reference counting. All blocks here are EXCLUSIVE - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 20ec8ee..103d113 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -34,11 +34,8 @@ public interface BlockCache extends Iterable { * @param cacheKey The block's cache key. * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory - * @param cacheDataInL1 If multi-tier block cache deploy -- i.e. has an L1 and L2 tier -- then - * if this flag is true, cache data blocks up in the L1 tier (meta blocks are probably being - * cached in L1 already). */ - void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, boolean cacheDataInL1); + void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory); /** * Add block to cache (defaults to not in-memory). @@ -146,5 +143,5 @@ public interface BlockCache extends Iterable { * @param cacheKey the cache key of the block * @param block the hfileblock to be returned */ - void returnBlock(BlockCacheKey cacheKey, Cacheable block); + default void returnBlock(BlockCacheKey cacheKey, Cacheable block){}; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index b89205c..1d4ccf9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; @@ -109,14 +108,6 @@ public class CacheConfig { public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; - /** - * If the bucket cache is used in league with the lru on-heap block cache (meta blocks such - * as indices and blooms are kept in the lru blockcache and the data blocks in the - * bucket cache). - */ - public static final String BUCKET_CACHE_COMBINED_KEY = - "hbase.bucketcache.combinedcache.enabled"; - public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength"; @@ -129,7 +120,6 @@ public class CacheConfig { /** * Defaults for Bucket cache */ - public static final boolean DEFAULT_BUCKET_CACHE_COMBINED = true; public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3; public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64; @@ -218,13 +208,6 @@ public class CacheConfig { /** Whether data blocks should be prefetched into the cache */ private final boolean prefetchOnOpen; - /** - * If true and if more than one tier in this cache deploy -- e.g. CombinedBlockCache has an L1 - * and an L2 tier -- then cache data blocks up in the L1 tier (The meta blocks are likely being - * cached up in L1 already. At least this is the case if CombinedBlockCache). - */ - private boolean cacheDataInL1; - private final boolean dropBehindCompaction; /** @@ -251,8 +234,6 @@ public class CacheConfig { conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(), - conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1, - ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) ); LOG.info("Created cacheConfig for " + family.getNameAsString() + ": " + this); @@ -276,8 +257,6 @@ public class CacheConfig { conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE), conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN), - conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1, - ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) ); LOG.info("Created cacheConfig: " + this); @@ -305,7 +284,7 @@ public class CacheConfig { final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite, final boolean cacheBloomsOnWrite, final boolean evictOnClose, final boolean cacheDataCompressed, final boolean prefetchOnOpen, - final boolean cacheDataInL1, final boolean dropBehindCompaction) { + final boolean dropBehindCompaction) { this.blockCache = blockCache; this.cacheDataOnRead = cacheDataOnRead; this.inMemory = inMemory; @@ -315,7 +294,6 @@ public class CacheConfig { this.evictOnClose = evictOnClose; this.cacheDataCompressed = cacheDataCompressed; this.prefetchOnOpen = prefetchOnOpen; - this.cacheDataInL1 = cacheDataInL1; this.dropBehindCompaction = dropBehindCompaction; } @@ -328,12 +306,11 @@ public class CacheConfig { cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite, cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose, cacheConf.cacheDataCompressed, cacheConf.prefetchOnOpen, - cacheConf.cacheDataInL1, cacheConf.dropBehindCompaction); + cacheConf.dropBehindCompaction); } private CacheConfig() { - this(null, false, false, false, false, false, - false, false, false, false, false); + this(null, false, false, false, false, false, false, false, false, false); } /** @@ -387,13 +364,6 @@ public class CacheConfig { } /** - * @return True if cache data blocks in L1 tier (if more than one tier in block cache deploy). - */ - public boolean isCacheDataInL1() { - return isBlockCacheEnabled() && this.cacheDataInL1; - } - - /** * @return true if data blocks should be written to the cache when an HFile is * written, false if not */ @@ -412,16 +382,6 @@ public class CacheConfig { } /** - * Only used for testing. - * @param cacheDataInL1 Whether to cache data blocks up in l1 (if a multi-tier cache - * implementation). - */ - @VisibleForTesting - public void setCacheDataInL1(boolean cacheDataInL1) { - this.cacheDataInL1 = cacheDataInL1; - } - - /** * @return true if index blocks should be written to the cache when an HFile * is written, false if not */ @@ -547,8 +507,8 @@ public class CacheConfig { // Clear this if in tests you'd make more than one block cache instance. @VisibleForTesting static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE; - private static LruBlockCache GLOBAL_L1_CACHE_INSTANCE = null; - private static BlockCache GLOBAL_L2_CACHE_INSTANCE = null; + private static LruBlockCache LRU_CACHE_INSTANCE = null; + private static BlockCache SECOND_CACHE_INSTANCE = null;// Can be BucketCache or External cache. /** Boolean whether we have disabled the block cache entirely. */ @VisibleForTesting @@ -558,20 +518,20 @@ public class CacheConfig { * @param c Configuration to use. * @return An L1 instance. Currently an instance of LruBlockCache. */ - public static LruBlockCache getL1(final Configuration c) { - return getL1Internal(c); + public static LruBlockCache getLRUCache(final Configuration c) { + return getLRUCacheInternal(c); } - public CacheStats getL1Stats() { - if (GLOBAL_L1_CACHE_INSTANCE != null) { - return GLOBAL_L1_CACHE_INSTANCE.getStats(); + public CacheStats getLruCacheStats() { + if (LRU_CACHE_INSTANCE != null) { + return LRU_CACHE_INSTANCE.getStats(); } return null; } - public CacheStats getL2Stats() { - if (GLOBAL_L2_CACHE_INSTANCE != null) { - return GLOBAL_L2_CACHE_INSTANCE.getStats(); + public CacheStats getSecondCacheStats() { + if (SECOND_CACHE_INSTANCE != null) { + return SECOND_CACHE_INSTANCE.getStats(); } return null; } @@ -580,8 +540,8 @@ public class CacheConfig { * @param c Configuration to use. * @return An L1 instance. Currently an instance of LruBlockCache. */ - private synchronized static LruBlockCache getL1Internal(final Configuration c) { - if (GLOBAL_L1_CACHE_INSTANCE != null) return GLOBAL_L1_CACHE_INSTANCE; + private synchronized static LruBlockCache getLRUCacheInternal(final Configuration c) { + if (LRU_CACHE_INSTANCE != null) return LRU_CACHE_INSTANCE; final long lruCacheSize = MemorySizeUtil.getLruCacheSize(c); if (lruCacheSize < 0) { blockCacheDisabled = true; @@ -590,33 +550,14 @@ public class CacheConfig { int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); LOG.info("Allocating LruBlockCache size=" + StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); - GLOBAL_L1_CACHE_INSTANCE = new LruBlockCache(lruCacheSize, blockSize, true, c); - return GLOBAL_L1_CACHE_INSTANCE; + LRU_CACHE_INSTANCE = new LruBlockCache(lruCacheSize, blockSize, true, c); + return LRU_CACHE_INSTANCE; } - /** - * @param c Configuration to use. - * @return Returns L2 block cache instance (for now it is BucketCache BlockCache all the time) - * or null if not supposed to be a L2. - */ - @VisibleForTesting - static BlockCache getL2(final Configuration c) { - final boolean useExternal = c.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); + private static BlockCache getExternalBlockcache(Configuration c) { if (LOG.isDebugEnabled()) { - LOG.debug("Trying to use " + (useExternal?" External":" Internal") + " l2 cache"); - } - - // If we want to use an external block cache then create that. - if (useExternal) { - GLOBAL_L2_CACHE_INSTANCE = getExternalBlockcache(c); - } else { - // otherwise use the bucket cache. - GLOBAL_L2_CACHE_INSTANCE = getBucketCache(c); + LOG.debug("Trying to use External l2 cache"); } - return GLOBAL_L2_CACHE_INSTANCE; - } - - private static BlockCache getExternalBlockcache(Configuration c) { Class klass = null; // Get the class, from the config. s @@ -642,7 +583,8 @@ public class CacheConfig { } - private static BlockCache getBucketCache(Configuration c) { + @VisibleForTesting + static BucketCache getBucketCache(Configuration c) { // Check for L2. ioengine name must be non-null. String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null; @@ -705,30 +647,25 @@ public class CacheConfig { public static synchronized BlockCache instantiateBlockCache(Configuration conf) { if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE; if (blockCacheDisabled) return null; - LruBlockCache l1 = getL1Internal(conf); - // blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the call. + LruBlockCache lruCache = getLRUCacheInternal(conf); + // blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the + // call. if (blockCacheDisabled) return null; - BlockCache l2 = getL2(conf); - if (l2 == null) { - GLOBAL_BLOCK_CACHE_INSTANCE = l1; + boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); + if (useExternal) { + SECOND_CACHE_INSTANCE = getExternalBlockcache(conf); + GLOBAL_BLOCK_CACHE_INSTANCE = SECOND_CACHE_INSTANCE == null ? lruCache + : new InclusiveCombinedBlockCache(lruCache, SECOND_CACHE_INSTANCE); } else { - boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); - boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY, - DEFAULT_BUCKET_CACHE_COMBINED); - if (useExternal) { - GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2); - } else { - if (combinedWithLru) { - GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2); - } else { - // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler - // mechanism. It is a little ugly but works according to the following: when the - // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get - // a block from the L1 cache, if not in L1, we will search L2. - GLOBAL_BLOCK_CACHE_INSTANCE = l1; - } + // otherwise use the bucket cache. + SECOND_CACHE_INSTANCE = getBucketCache(conf); + if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { + // Non combined mode is off from 2.0 + LOG.warn( + "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); } - l1.setVictimCache(l2); + GLOBAL_BLOCK_CACHE_INSTANCE = SECOND_CACHE_INSTANCE == null ? lruCache + : new CombinedBlockCache(lruCache, SECOND_CACHE_INSTANCE); } return GLOBAL_BLOCK_CACHE_INSTANCE; } @@ -736,8 +673,8 @@ public class CacheConfig { // Supposed to use only from tests. Some tests want to reinit the Global block cache instance @VisibleForTesting static synchronized void clearGlobalInstances() { - GLOBAL_L1_CACHE_INSTANCE = null; - GLOBAL_L2_CACHE_INSTANCE = null; + LRU_CACHE_INSTANCE = null; + SECOND_CACHE_INSTANCE = null; GLOBAL_BLOCK_CACHE_INSTANCE = null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 201a41b..17e672f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -31,49 +31,46 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe /** * CombinedBlockCache is an abstraction layer that combines * {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used - * to cache bloom blocks and index blocks. The larger l2Cache is used to + * to cache bloom blocks and index blocks. The larger Cache is used to * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads - * first from the smaller lruCache before looking for the block in the l2Cache. Blocks evicted - * from lruCache are put into the bucket cache. + * first from the smaller lruCache before looking for the block in the l2Cache. * Metrics are the combined size and hits and misses of both caches. - * */ @InterfaceAudience.Private public class CombinedBlockCache implements ResizableBlockCache, HeapSize { protected final LruBlockCache lruCache; - protected final BlockCache l2Cache; + protected final BlockCache secondCache; protected final CombinedCacheStats combinedCacheStats; - public CombinedBlockCache(LruBlockCache lruCache, BlockCache l2Cache) { + public CombinedBlockCache(LruBlockCache lruCache, BlockCache secondCache) { this.lruCache = lruCache; - this.l2Cache = l2Cache; + this.secondCache = secondCache; this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(), - l2Cache.getStats()); + secondCache.getStats()); } @Override public long heapSize() { long l2size = 0; - if (l2Cache instanceof HeapSize) { - l2size = ((HeapSize) l2Cache).heapSize(); + if (secondCache instanceof HeapSize) { + l2size = ((HeapSize) secondCache).heapSize(); } return lruCache.heapSize() + l2size; } @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - final boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; - if (metaBlock || cacheDataInL1) { - lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); + if (metaBlock) { + lruCache.cacheBlock(cacheKey, buf, inMemory); } else { - l2Cache.cacheBlock(cacheKey, buf, inMemory, false); + secondCache.cacheBlock(cacheKey, buf, inMemory); } } @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { - cacheBlock(cacheKey, buf, false, false); + cacheBlock(cacheKey, buf, false); } @Override @@ -81,20 +78,22 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { boolean repeat, boolean updateCacheMetrics) { // TODO: is there a hole here, or just awkwardness since in the lruCache getBlock // we end up calling l2Cache.getBlock. + // We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting + // passed always. return lruCache.containsBlock(cacheKey)? lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): - l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + secondCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override public boolean evictBlock(BlockCacheKey cacheKey) { - return lruCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey); + return lruCache.evictBlock(cacheKey) || secondCache.evictBlock(cacheKey); } @Override public int evictBlocksByHfileName(String hfileName) { return lruCache.evictBlocksByHfileName(hfileName) - + l2Cache.evictBlocksByHfileName(hfileName); + + secondCache.evictBlocksByHfileName(hfileName); } @Override @@ -105,42 +104,42 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public void shutdown() { lruCache.shutdown(); - l2Cache.shutdown(); + secondCache.shutdown(); } @Override public long size() { - return lruCache.size() + l2Cache.size(); + return lruCache.size() + secondCache.size(); } @Override public long getMaxSize() { - return lruCache.getMaxSize() + l2Cache.getMaxSize(); + return lruCache.getMaxSize() + secondCache.getMaxSize(); } @Override public long getCurrentDataSize() { - return lruCache.getCurrentDataSize() + l2Cache.getCurrentDataSize(); + return lruCache.getCurrentDataSize() + secondCache.getCurrentDataSize(); } @Override public long getFreeSize() { - return lruCache.getFreeSize() + l2Cache.getFreeSize(); + return lruCache.getFreeSize() + secondCache.getFreeSize(); } @Override public long getCurrentSize() { - return lruCache.getCurrentSize() + l2Cache.getCurrentSize(); + return lruCache.getCurrentSize() + secondCache.getCurrentSize(); } @Override public long getBlockCount() { - return lruCache.getBlockCount() + l2Cache.getBlockCount(); + return lruCache.getBlockCount() + secondCache.getBlockCount(); } @Override public long getDataBlockCount() { - return lruCache.getDataBlockCount() + l2Cache.getDataBlockCount(); + return lruCache.getDataBlockCount() + secondCache.getDataBlockCount(); } public static class CombinedCacheStats extends CacheStats { @@ -363,7 +362,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public BlockCache[] getBlockCaches() { - return new BlockCache [] {this.lruCache, this.l2Cache}; + return new BlockCache [] {this.lruCache, this.secondCache}; } @Override @@ -374,11 +373,12 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { // returnBlock is meaningful for L2 cache alone. - this.l2Cache.returnBlock(cacheKey, block); + this.secondCache.returnBlock(cacheKey, block); } @VisibleForTesting public int getRefCount(BlockCacheKey cacheKey) { - return ((BucketCache) this.l2Cache).getRefCount(cacheKey); + return (this.secondCache instanceof BucketCache) + ? ((BucketCache) this.secondCache).getRefCount(cacheKey) : 0; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index f216f42..5021b4d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1408,8 +1408,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // Cache the block if (cacheBlock) { - cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock, - cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1()); + cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock, cacheConf.isInMemory()); } return metaBlock; @@ -1495,7 +1494,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) { cacheConf.getBlockCache().cacheBlock(cacheKey, cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked, - cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1()); + cacheConf.isInMemory()); } if (updateCacheMetrics && hfileBlock.getBlockType().isData()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java index 4d7126e..71a4290 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hbase.io.hfile; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class InclusiveCombinedBlockCache extends CombinedBlockCache implements BlockCache { +@InterfaceAudience.Private +public class InclusiveCombinedBlockCache extends CombinedBlockCache { public InclusiveCombinedBlockCache(LruBlockCache l1, BlockCache l2) { super(l1,l2); + l1.setVictimCache(l2); } @Override @@ -46,13 +46,19 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache implements B * @param cacheDataInL1 This is totally ignored. */ @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - final boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { // This is the inclusive part of the combined block cache. // Every block is placed into both block caches. - lruCache.cacheBlock(cacheKey, buf, inMemory, true); + lruCache.cacheBlock(cacheKey, buf, inMemory); // This assumes that insertion into the L2 block cache is either async or very fast. - l2Cache.cacheBlock(cacheKey, buf, inMemory, true); + secondCache.cacheBlock(cacheKey, buf, inMemory); + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + boolean l1Result = this.lruCache.evictBlock(cacheKey); + boolean l2Result = this.secondCache.evictBlock(cacheKey); + return l1Result || l2Result; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0fde0a7..eab4d08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -223,7 +223,11 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { /** Whether in-memory hfile's data block has higher priority when evicting */ private boolean forceInMemory; - /** Where to send victims (blocks evicted/missing from the cache) */ + /** + * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an + * external cache as L2. + * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + */ private BlockCache victimHandler = null; /** @@ -360,9 +364,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param inMemory if block is in-memory */ @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - final boolean cacheDataInL1) { - + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { if (buf.heapSize() > maxBlockSize) { // If there are a lot of blocks that are too // big this can make the logs way too noisy. @@ -448,7 +450,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param buf block buffer */ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { - cacheBlock(cacheKey, buf, false, false); + cacheBlock(cacheKey, buf, false); } /** @@ -499,7 +501,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { if (result instanceof HFileBlock && ((HFileBlock) result).usesSharedMemory()) { result = ((HFileBlock) result).deepClone(); } - cacheBlock(cacheKey, result, /* inMemory = */ false, /* cacheData = */ true); + cacheBlock(cacheKey, result, /* inMemory = */ false); } return result; } @@ -577,14 +579,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { // update the stats counter. stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary()); if (victimHandler != null) { - if (victimHandler instanceof BucketCache) { - boolean wait = getCurrentSize() < acceptableSize(); - boolean inMemory = block.getPriority() == BlockPriority.MEMORY; - ((BucketCache) victimHandler).cacheBlockWithWait(block.getCacheKey(), block.getBuffer(), - inMemory, true, wait); - } else { - victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); - } + victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); } } return block.heapSize(); @@ -1179,10 +1174,6 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return map; } - BlockCache getVictimHandler() { - return this.victimHandler; - } - @Override @JsonIgnore public BlockCache[] getBlockCaches() { @@ -1190,16 +1181,4 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return new BlockCache[] {this, this.victimHandler}; return null; } - - @Override - public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { - // There is no SHARED type here in L1. But the block might have been served from the Victim - // handler L2 cache. (when the Combined mode = false). So just try return this block to - // L2 victim handler cache. - // Note : In case of CombinedBlockCache, we will have this victimHandler configured for L1 - // cache. But CombinedBlockCache will only call returnBlock on L2 cache. - if (this.victimHandler != null) { - this.victimHandler.returnBlock(cacheKey, block); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index b0011d7..8ed7b37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -397,7 +397,7 @@ public class BucketCache implements BlockCache, HeapSize { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { - cacheBlock(cacheKey, buf, false, false); + cacheBlock(cacheKey, buf, false); } /** @@ -408,9 +408,8 @@ public class BucketCache implements BlockCache, HeapSize { * @param cacheDataInL1 */ @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, - final boolean cacheDataInL1) { - cacheBlockWithWait(cacheKey, cachedItem, inMemory, cacheDataInL1, wait_when_cache); + public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory) { + cacheBlockWithWait(cacheKey, cachedItem, inMemory, wait_when_cache); } /** @@ -421,23 +420,18 @@ public class BucketCache implements BlockCache, HeapSize { * @param wait if true, blocking wait when queue is full */ public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, - boolean cacheDataInL1, boolean wait) { + boolean wait) { if (LOG.isTraceEnabled()) LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem); if (!cacheEnabled) { return; } if (backingMap.containsKey(cacheKey)) { - /* - * Compare already cached block only if lruBlockCache is not used to cache data blocks - */ - if (!cacheDataInL1) { - Cacheable existingBlock = getBlock(cacheKey, false, false, false); - if (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) { - throw new RuntimeException("Cached block contents differ, which should not have happened." - + "cacheKey:" + cacheKey); - } - } + Cacheable existingBlock = getBlock(cacheKey, false, false, false); + if (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) { + throw new RuntimeException("Cached block contents differ, which should not have happened." + + "cacheKey:" + cacheKey); + } String msg = "Caching an already cached block: " + cacheKey; msg += ". This is harmless and can happen in rare cases (see HBASE-8547)"; LOG.warn(msg); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index b64937e..5ccd13e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -107,9 +107,9 @@ public class HeapMemoryManager { public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher, Server server, RegionServerAccounting regionServerAccounting) { - ResizableBlockCache l1Cache = CacheConfig.getL1(conf); - if (l1Cache != null) { - return new HeapMemoryManager(l1Cache, memStoreFlusher, server, regionServerAccounting); + ResizableBlockCache lruCache = CacheConfig.getLRUCache(conf); + if (lruCache != null) { + return new HeapMemoryManager(lruCache, memStoreFlusher, server, regionServerAccounting); } return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 75d8e56..42567e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -153,8 +153,8 @@ class MetricsRegionServerWrapperImpl private synchronized void initBlockCache() { CacheConfig cacheConfig = this.regionServer.cacheConfig; if (cacheConfig != null) { - l1Stats = cacheConfig.getL1Stats(); - l2Stats = cacheConfig.getL2Stats(); + l1Stats = cacheConfig.getLruCacheStats(); + l2Stats = cacheConfig.getSecondCacheStats(); if (this.blockCache == null) { this.blockCache = cacheConfig.getBlockCache(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index 710d408..56c52ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.MemoryUsage; import java.nio.ByteBuffer; -import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -186,7 +185,7 @@ public class TestCacheConfig { Cacheable c = new DataCacheEntry(); // Do asserts on block counting. long initialBlockCount = bc.getBlockCount(); - bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1()); + bc.cacheBlock(bck, c, cc.isInMemory()); assertEquals(doubling? 2: 1, bc.getBlockCount() - initialBlockCount); bc.evictBlock(bck); assertEquals(initialBlockCount, bc.getBlockCount()); @@ -194,7 +193,7 @@ public class TestCacheConfig { // buffers do lazy allocation so sizes are off on first go around. if (sizing) { long originalSize = bc.getCurrentSize(); - bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1()); + bc.cacheBlock(bck, c, cc.isInMemory()); assertTrue(bc.getCurrentSize() > originalSize); bc.evictBlock(bck); long size = bc.getCurrentSize(); @@ -202,19 +201,6 @@ public class TestCacheConfig { } } - /** - * @param cc - * @param filename - * @return - */ - private long cacheDataBlock(final CacheConfig cc, final String filename) { - BlockCacheKey bck = new BlockCacheKey(filename, 0); - Cacheable c = new DataCacheEntry(); - // Do asserts on block counting. - cc.getBlockCache().cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1()); - return cc.getBlockCache().getBlockCount(); - } - @Test public void testDisableCacheDataBlock() throws IOException { Configuration conf = HBaseConfiguration.create(); @@ -347,14 +333,14 @@ public class TestCacheConfig { long bcExpectedSize = 100 * 1024 * 1024; // MB. assertTrue(lruExpectedSize < bcExpectedSize); this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); - this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY, false); CacheConfig cc = new CacheConfig(this.conf); basicBlockCacheOps(cc, false, false); - assertTrue(cc.getBlockCache() instanceof LruBlockCache); + assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); // TODO: Assert sizes allocated are right and proportions. - LruBlockCache lbc = (LruBlockCache)cc.getBlockCache(); + CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache(); + LruBlockCache lbc = cbc.lruCache; assertEquals(lruExpectedSize, lbc.getMaxSize()); - BlockCache bc = lbc.getVictimHandler(); + BlockCache bc = cbc.secondCache; // getMaxSize comes back in bytes but we specified size in MB assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize()); // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2. @@ -362,7 +348,7 @@ public class TestCacheConfig { long initialL2BlockCount = bc.getBlockCount(); Cacheable c = new DataCacheEntry(); BlockCacheKey bck = new BlockCacheKey("bck", 0); - lbc.cacheBlock(bck, c, false, false); + lbc.cacheBlock(bck, c, false); assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); assertEquals(initialL2BlockCount, bc.getBlockCount()); // Force evictions by putting in a block too big. @@ -381,32 +367,6 @@ public class TestCacheConfig { // The eviction thread in lrublockcache needs to run. while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10); assertEquals(initialL1BlockCount, lbc.getBlockCount()); - long count = bc.getBlockCount(); - assertTrue(initialL2BlockCount + 1 <= count); - } - - /** - * Test the cacheDataInL1 flag. When set, data blocks should be cached in the l1 tier, up in - * LruBlockCache when using CombinedBlockCcahe. - */ - @Test - public void testCacheDataInL1() { - this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); - this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100); - CacheConfig cc = new CacheConfig(this.conf); - assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); - CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache(); - // Add a data block. Should go into L2, into the Bucket Cache, not the LruBlockCache. - cacheDataBlock(cc, "1"); - LruBlockCache lrubc = (LruBlockCache)cbc.getBlockCaches()[0]; - assertDataBlockCount(lrubc, 0); - // Enable our test flag. - cc.setCacheDataInL1(true); - cacheDataBlock(cc, "2"); - assertDataBlockCount(lrubc, 1); - cc.setCacheDataInL1(false); - cacheDataBlock(cc, "3"); - assertDataBlockCount(lrubc, 1); } @Test @@ -416,16 +376,9 @@ public class TestCacheConfig { c.set(CacheConfig.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096"); c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024); try { - CacheConfig.getL2(c); + CacheConfig.getBucketCache(c); fail("Should throw IllegalArgumentException when passing illegal value for bucket size"); } catch (IllegalArgumentException e) { } } - - private void assertDataBlockCount(final LruBlockCache bc, final int expected) { - Map blocks = bc.getBlockTypeCountsForTest(); - assertEquals(expected, blocks == null? 0: - blocks.get(BlockType.DATA) == null? 0: - blocks.get(BlockType.DATA).intValue()); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 47bf5a4..9535a46 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -230,7 +230,7 @@ public class TestCacheOnWrite { new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), cowType.shouldBeCached(BlockType.LEAF_INDEX), cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, - false, false, false); + false, false); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 4eec0bf..af169f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -80,7 +80,7 @@ public class TestLruBlockCache { for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); boolean inMemory = Math.random() > 0.5; - cache.cacheBlock(block.cacheKey, block, inMemory, false); + cache.cacheBlock(block.cacheKey, block, inMemory); } cache.evictBlocksByHfileName(hfileName); } @@ -350,7 +350,7 @@ public class TestLruBlockCache { cache.getBlock(multiBlocks[i].cacheKey, true, false, true); // Add memory blocks as such - cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false); + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize(); } @@ -385,7 +385,7 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); // Insert another memory block - cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false); + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); // Three evictions, three evicted. assertEquals(3, cache.getStats().getEvictionCount()); @@ -423,7 +423,7 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); // Cache a big memory block - cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true, false); + cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true); // Six evictions, twelve evicted (3 new) assertEquals(6, cache.getStats().getEvictionCount()); @@ -478,7 +478,7 @@ public class TestLruBlockCache { assertEquals(expectedCacheSize, cache.heapSize()); // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1 - cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true, false); + cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true); // Single eviction, one block evicted assertEquals(1, cache.getStats().getEvictionCount()); assertEquals(1, cache.getStats().getEvictedCount()); @@ -486,7 +486,7 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2 - cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true, false); + cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true); // Two evictions, two evicted. assertEquals(2, cache.getStats().getEvictionCount()); assertEquals(2, cache.getStats().getEvictedCount()); @@ -494,10 +494,10 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6 - cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true, false); - cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false); - cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true, false); - cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true, false); + cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true); + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); + cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true); + cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true); // Three evictions, three evicted. assertEquals(6, cache.getStats().getEvictionCount()); assertEquals(6, cache.getStats().getEvictedCount()); @@ -509,9 +509,9 @@ public class TestLruBlockCache { // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted // si:mu:me = 0:0:9 - cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true, false); - cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true, false); - cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true, false); + cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true); + cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true); + cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true); // Three evictions, three evicted. assertEquals(9, cache.getStats().getEvictionCount()); assertEquals(9, cache.getStats().getEvictedCount()); @@ -522,7 +522,7 @@ public class TestLruBlockCache { // 5. Insert one memory block, the oldest memory evicted // si:mu:me = 0:0:9 - cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true, false); + cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true); // one eviction, one evicted. assertEquals(10, cache.getStats().getEvictionCount()); assertEquals(10, cache.getStats().getEvictedCount()); @@ -679,7 +679,7 @@ public class TestLruBlockCache { cache.getBlock(multiBlocks[i].cacheKey, true, false, true); // Add memory blocks as such - cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false); + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); } // Do not expect any evictions yet diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 1c7f951..83edf8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -104,12 +104,11 @@ public class TestBucketCache { } @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { if (super.getBlock(cacheKey, true, false, true) != null) { throw new RuntimeException("Cached an already cached block"); } - super.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); + super.cacheBlock(cacheKey, buf, inMemory); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index 76b4faf..c71e0c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -645,8 +645,7 @@ public class TestHeapMemoryManager { } @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { }