From 53a498464875b215c5c9dd8ca80e0bb6c9a45c5b Mon Sep 17 00:00:00 2001 From: Ben Manes Date: Sun, 27 Mar 2016 04:24:42 -0700 Subject: [PATCH] W-TinyLFU based BlockCache --- .../java/org/apache/hadoop/hbase/HConstants.java | 5 + hbase-common/src/main/resources/hbase-default.xml | 5 + hbase-server/pom.xml | 4 + .../apache/hadoop/hbase/io/hfile/CacheConfig.java | 70 +++-- .../hbase/io/hfile/CaffeinatedBlockCache.java | 342 +++++++++++++++++++++ .../hadoop/hbase/io/hfile/CombinedBlockCache.java | 36 +-- .../hbase/io/hfile/FirstLevelBlockCache.java | 33 ++ .../io/hfile/InclusiveCombinedBlockCache.java | 6 +- .../hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- .../hadoop/hbase/io/hfile/TestCacheConfig.java | 4 +- pom.xml | 8 +- 11 files changed, 467 insertions(+), 48 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CaffeinatedBlockCache.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 0c6244f..2f083b0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -884,6 +884,11 @@ public final class HConstants { public static final float HFILE_BLOCK_CACHE_SIZE_DEFAULT = 0.4f; + public static final String HFILE_BLOCK_CACHE_POLICY_KEY = + "hfile.block.cache.policy"; + + public static final String HFILE_BLOCK_CACHE_POLICY_DEFAULT = "LRU"; + /* * Minimum percentage of free heap necessary for a successful cluster startup. */ diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index e50e89e..aa48508 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -833,6 +833,11 @@ possible configurations would overwhelm and obscure the important. The default thread pool size if parallel-seeking feature enabled. + hfile.block.cache.policy + LRU + The eviction policy for the L1 block cache (LRU or TinyLFU). + + hfile.block.cache.size 0.4 Percentage of maximum heap (-Xmx setting) to allocate to block cache diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index d5f1e30..0f51509 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -435,6 +435,10 @@ true + com.github.ben-manes.caffeine + caffeine + + io.dropwizard.metrics metrics-core diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 2680c3d..915d5bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -23,13 +23,14 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.MemoryUsage; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -97,7 +98,7 @@ public class CacheConfig { * is an in-memory map that needs to be persisted across restarts. Where to store this * in-memory state is what you supply here: e.g. /tmp/bucketcache.map. */ - public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = + public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; /** @@ -105,11 +106,11 @@ public class CacheConfig { * as indices and blooms are kept in the lru blockcache and the data blocks in the * bucket cache). */ - public static final String BUCKET_CACHE_COMBINED_KEY = + public static final String BUCKET_CACHE_COMBINED_KEY = "hbase.bucketcache.combinedcache.enabled"; public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; - public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = + public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength"; /** @@ -447,7 +448,9 @@ public class CacheConfig { * @return true if this {@link BlockCategory} should be compressed in blockcache, false otherwise */ public boolean shouldCacheCompressed(BlockCategory category) { - if (!isBlockCacheEnabled()) return false; + if (!isBlockCacheEnabled()) { + return false; + } switch (category) { case DATA: return this.cacheDataOnRead && this.cacheDataCompressed; @@ -534,7 +537,7 @@ public class CacheConfig { @VisibleForTesting static boolean blockCacheDisabled = false; - static long getLruCacheSize(final Configuration conf, final MemoryUsage mu) { + static long getFirstLevelCacheSize(final Configuration conf, final MemoryUsage mu) { float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); if (cachePercentage <= 0.0001f) { @@ -553,15 +556,27 @@ public class CacheConfig { /** * @param c Configuration to use. * @param mu JMX Memory Bean - * @return An L1 instance. Currently an instance of LruBlockCache. + * @return An L1 instance. */ - private static LruBlockCache getL1(final Configuration c, final MemoryUsage mu) { - long lruCacheSize = getLruCacheSize(c, mu); - if (lruCacheSize < 0) return null; - int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); - LOG.info("Allocating LruBlockCache size=" + - StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); - return new LruBlockCache(lruCacheSize, blockSize, true, c); + private static FirstLevelBlockCache getL1(long cacheSize, Optional victimCache, + final Configuration c, final MemoryUsage mu) { + if (cacheSize < 0) { + return null; + } + String policy = c.get(HConstants.HFILE_BLOCK_CACHE_POLICY_KEY, + HConstants.HFILE_BLOCK_CACHE_POLICY_DEFAULT); + if (policy.equalsIgnoreCase("LRU")) { + int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); + LOG.info("Allocating LruBlockCache size=" + + StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); + LruBlockCache l1 = new LruBlockCache(cacheSize, blockSize, true, c); + l1.setVictimCache(victimCache.orElse(null)); + return l1; + } else if (policy.equalsIgnoreCase("TinyLFU")) { + return new CaffeinatedBlockCache(cacheSize, victimCache); + } else { + throw new IllegalStateException("Unknown policy: " + policy); + } } /** @@ -615,7 +630,9 @@ public class CacheConfig { private static BlockCache getBucketCache(Configuration c, MemoryUsage mu) { // Check for L2. ioengine name must be non-null. String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); - if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null; + if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) { + return null; + } int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); float bucketCachePercentage = c.getFloat(BUCKET_CACHE_SIZE_KEY, 0F); @@ -665,23 +682,31 @@ public class CacheConfig { * @return The block cache or null. */ public static synchronized BlockCache instantiateBlockCache(Configuration conf) { - if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE; - if (blockCacheDisabled) return null; + if (GLOBAL_BLOCK_CACHE_INSTANCE != null) { + return GLOBAL_BLOCK_CACHE_INSTANCE; + } + if (blockCacheDisabled) { + return null; + } + // blockCacheDisabled is set as a side-effect of getFirstLevelCacheSize() + // so check it again after the call MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); - LruBlockCache l1 = getL1(conf, mu); - // blockCacheDisabled is set as a side-effect of getL1(), so check it again after the call. - if (blockCacheDisabled) return null; + long l1CacheSize = getFirstLevelCacheSize(conf, mu); + if (blockCacheDisabled) { + return null; + } BlockCache l2 = getL2(conf, mu); + FirstLevelBlockCache l1 = getL1(l1CacheSize, Optional.ofNullable(l2), conf, mu); if (l2 == null) { GLOBAL_BLOCK_CACHE_INSTANCE = l1; } else { boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); - boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY, + boolean combinedWithL1 = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY, DEFAULT_BUCKET_CACHE_COMBINED); if (useExternal) { GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2); } else { - if (combinedWithLru) { + if (combinedWithL1) { GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2); } else { // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler @@ -691,7 +716,6 @@ public class CacheConfig { GLOBAL_BLOCK_CACHE_INSTANCE = l1; } } - l1.setVictimCache(l2); } return GLOBAL_BLOCK_CACHE_INSTANCE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CaffeinatedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CaffeinatedBlockCache.java new file mode 100644 index 0000000..a53bccb --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CaffeinatedBlockCache.java @@ -0,0 +1,342 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import java.util.Comparator; +import java.util.Iterator; +import java.util.Optional; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.util.StringUtils; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.Policy.Eviction; +import com.github.benmanes.caffeine.cache.RemovalCause; +import com.github.benmanes.caffeine.cache.RemovalListener; +import com.google.common.base.Objects; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * A block cache that is memory-aware using {@link HeapSize}, memory bounded using the W-TinyLFU + * eviction algorithm, and concurrent. This implementation delegates to a Caffeine cache to provide + * O(1) read and write operations. + *
    + *
  • W-TinyLFU: http://arxiv.org/pdf/1512.00727.pdf
  • + *
  • Caffeine: https://github.com/ben-manes/caffeine
  • + *
  • Cache design: http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
  • + *
+ * + * @author ben.manes@gmail.com (Ben Manes) + */ +@InterfaceAudience.Private +public final class CaffeinatedBlockCache implements FirstLevelBlockCache { + private static final Log LOG = LogFactory.getLog(CaffeinatedBlockCache.class); + private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; + private static final int STAT_THREAD_PERIOD_SECONDS = 5 * 60; + + private final Eviction policy; + private final ScheduledExecutorService statsThreadPool; + private final Cache cache; + private final BlockCache victimCache; + private final CacheStats stats; + + /** + * Creates a block cache. + * + * @param maximumSizeInBytes maximum size of this cache, in bytes + * @param victimCache the second level cache + */ + public CaffeinatedBlockCache(long maximumSizeInBytes, Optional victimCache) { + this.cache = Caffeine.newBuilder() + .maximumWeight(maximumSizeInBytes) + .removalListener(new EvictionListener()) + .weigher((BlockCacheKey key, Cacheable value) -> value.getSerializedLength()) + .initialCapacity((int) Math.ceil((1.2 * maximumSizeInBytes) / DEFAULT_MAX_BLOCK_SIZE)) + .build(); + this.victimCache = victimCache.orElse(null); + this.policy = cache.policy().eviction().get(); + this.stats = new CacheStats(getClass().getSimpleName()); + + statsThreadPool = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder() + .setNameFormat("CaffeinatedBlockCacheStatsExecutor").setDaemon(true).build()); + statsThreadPool.scheduleAtFixedRate(this::logStats, + STAT_THREAD_PERIOD_SECONDS, STAT_THREAD_PERIOD_SECONDS, TimeUnit.SECONDS); + } + + @Override + public long size() { + return policy.getMaximum(); + } + + @Override + public long getFreeSize() { + return size() - getCurrentSize(); + } + + @Override + public long getCurrentSize() { + return policy.weightedSize().getAsLong(); + } + + @Override + public long getBlockCount() { + return cache.estimatedSize(); + } + + @Override + public long heapSize() { + return getCurrentSize(); + } + + @Override + public void setMaxSize(long size) { + policy.setMaximum(size); + } + + @Override + public boolean containsBlock(BlockCacheKey cacheKey) { + return cache.asMap().containsKey(cacheKey); + } + + @Override + public Cacheable getBlock(BlockCacheKey cacheKey, + boolean caching, boolean repeat, boolean updateCacheMetrics) { + Cacheable value = cache.getIfPresent(cacheKey); + if (value == null) { + if (repeat) { + return null; + } + if (updateCacheMetrics) { + stats.miss(caching, cacheKey.isPrimary()); + } + if (victimCache != null) { + value = victimCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + if ((value != null) && caching) { + cacheBlock(cacheKey, value); + } + } + } else if (updateCacheMetrics) { + stats.hit(caching, cacheKey.isPrimary()); + } + return value; + } + + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable value, + boolean inMemory, boolean cacheDataInL1) { + cacheBlock(cacheKey, value); + } + + @Override + public void cacheBlock(BlockCacheKey key, Cacheable value) { + if ((value.heapSize() > DEFAULT_MAX_BLOCK_SIZE)) { + // If there are a lot of blocks that are too big this can make the logs too noisy (2% logged) + if (stats.failInsert() % 50 == 0) { + LOG.warn(String.format( + "Trying to cache too large a block %s @ %,d is %,d which is larger than %,d", + key.getHfileName(), key.getOffset(), value.heapSize(), DEFAULT_MAX_BLOCK_SIZE)); + } + } else { + cache.put(key, value); + } + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + Cacheable value = cache.asMap().remove(cacheKey); + return (value != null); + } + + @Override + public int evictBlocksByHfileName(String hfileName) { + int evicted = 0; + for (BlockCacheKey key : cache.asMap().keySet()) { + if (key.getHfileName().equals(hfileName) && evictBlock(key)) { + evicted++; + } + } + if (victimCache != null) { + evicted += victimCache.evictBlocksByHfileName(hfileName); + } + return evicted; + } + + @Override + public CacheStats getStats() { + return stats; + } + + @Override + public void shutdown() { + if (victimCache != null) { + victimCache.shutdown(); + } + statsThreadPool.shutdown(); + } + + @Override + public BlockCache[] getBlockCaches() { + return null; + } + + @Override + public Iterator iterator() { + long now = System.nanoTime(); + return cache.asMap().entrySet().stream() + .map(entry -> (CachedBlock) new CachedBlockView(entry.getKey(), entry.getValue(), now)) + .iterator(); + } + + @Override + public void returnBlock(BlockCacheKey cacheKey, Cacheable block) {} + + private void logStats() { + LOG.info( + "totalSize=" + StringUtils.byteDesc(heapSize()) + ", " + + "freeSize=" + StringUtils.byteDesc(getFreeSize()) + ", " + + "max=" + StringUtils.byteDesc(size()) + ", " + + "blockCount=" + getBlockCount() + ", " + + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + + "hitRatio=" + (stats.getHitCount() == 0 ? + "0," : StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ") + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + + "cachingHits=" + stats.getHitCachingCount() + ", " + + "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? + "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + + "evicted=" + stats.getEvictedCount()); + } + + @Override + public String toString() { + return Objects.toStringHelper(this) + .add("blockCount", getBlockCount()) + .add("currentSize", getCurrentSize()) + .add("freeSize", getFreeSize()) + .add("maxSize", size()) + .add("heapSize", heapSize()) + .add("victimCache", (victimCache != null)) + .toString(); + } + + /** A removal listener to asynchronously record evictions and populate the victim cache. */ + private final class EvictionListener implements RemovalListener { + @Override public void onRemoval(BlockCacheKey key, Cacheable value, RemovalCause cause) { + if (cause.wasEvicted()) { + stats.evict(); + } else { + // FIXME: Currently does not capture the insertion time + stats.evicted(/* cachedTime */ 0L, key.isPrimary()); + return; + } + + if (victimCache == null) { + return; + } else if (victimCache instanceof BucketCache) { + BucketCache victimBucketCache = (BucketCache) victimCache; + victimBucketCache.cacheBlockWithWait(key, value, /* inMemory */ true, /* wait */ true); + } else { + victimCache.cacheBlock(key, value); + } + } + } + + private static final class CachedBlockView implements CachedBlock { + private static final Comparator COMPARATOR = Comparator + .comparing(CachedBlock::getFilename) + .thenComparing(CachedBlock::getOffset) + .thenComparing(CachedBlock::getCachedTime); + + private final BlockCacheKey key; + private final Cacheable value; + private final long now; + + public CachedBlockView(BlockCacheKey key, Cacheable value, long now) { + this.now = now; + this.key = key; + this.value = value; + } + + @Override + public BlockPriority getBlockPriority() { + // This does not appear to be used in any meaningful way and is irrelevant to this cache + return BlockPriority.MEMORY; + } + + @Override + public BlockType getBlockType() { + return value.getBlockType(); + } + + @Override + public long getOffset() { + return key.getOffset(); + } + + @Override + public long getSize() { + return value.heapSize(); + } + + @Override + public long getCachedTime() { + // This does not appear to be used in any meaningful way, so not captured + return 0L; + } + + @Override + public String getFilename() { + return key.getHfileName(); + } + + @Override + public int compareTo(CachedBlock other) { + return COMPARATOR.compare(this, other); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (!(obj instanceof CachedBlock)) { + return false; + } + CachedBlock other = (CachedBlock) obj; + return compareTo(other) == 0; + } + + @Override + public int hashCode() { + return key.hashCode(); + } + + @Override + public String toString() { + return BlockCacheUtil.toString(this, now); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 666b357..021a0d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -40,14 +40,14 @@ import com.google.common.annotations.VisibleForTesting; */ @InterfaceAudience.Private public class CombinedBlockCache implements ResizableBlockCache, HeapSize { - protected final LruBlockCache lruCache; + protected final FirstLevelBlockCache l1Cache; protected final BlockCache l2Cache; protected final CombinedCacheStats combinedCacheStats; - public CombinedBlockCache(LruBlockCache lruCache, BlockCache l2Cache) { - this.lruCache = lruCache; + public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) { + this.l1Cache = l1Cache; this.l2Cache = l2Cache; - this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(), + this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(), l2Cache.getStats()); } @@ -57,7 +57,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { if (l2Cache instanceof HeapSize) { l2size = ((HeapSize) l2Cache).heapSize(); } - return lruCache.heapSize() + l2size; + return l1Cache.heapSize() + l2size; } @Override @@ -65,7 +65,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { final boolean cacheDataInL1) { boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; if (metaBlock || cacheDataInL1) { - lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); + l1Cache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); } else { l2Cache.cacheBlock(cacheKey, buf, inMemory, false); } @@ -81,19 +81,19 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { boolean repeat, boolean updateCacheMetrics) { // TODO: is there a hole here, or just awkwardness since in the lruCache getBlock // we end up calling l2Cache.getBlock. - return lruCache.containsBlock(cacheKey)? - lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): + return l1Cache.containsBlock(cacheKey)? + l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override public boolean evictBlock(BlockCacheKey cacheKey) { - return lruCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey); + return l1Cache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey); } @Override public int evictBlocksByHfileName(String hfileName) { - return lruCache.evictBlocksByHfileName(hfileName) + return l1Cache.evictBlocksByHfileName(hfileName) + l2Cache.evictBlocksByHfileName(hfileName); } @@ -104,28 +104,28 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public void shutdown() { - lruCache.shutdown(); + l1Cache.shutdown(); l2Cache.shutdown(); } @Override public long size() { - return lruCache.size() + l2Cache.size(); + return l1Cache.size() + l2Cache.size(); } @Override public long getFreeSize() { - return lruCache.getFreeSize() + l2Cache.getFreeSize(); + return l1Cache.getFreeSize() + l2Cache.getFreeSize(); } @Override public long getCurrentSize() { - return lruCache.getCurrentSize() + l2Cache.getCurrentSize(); + return l1Cache.getCurrentSize() + l2Cache.getCurrentSize(); } @Override public long getBlockCount() { - return lruCache.getBlockCount() + l2Cache.getBlockCount(); + return l1Cache.getBlockCount() + l2Cache.getBlockCount(); } public static class CombinedCacheStats extends CacheStats { @@ -242,18 +242,18 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public BlockCache[] getBlockCaches() { - return new BlockCache [] {this.lruCache, this.l2Cache}; + return new BlockCache [] {this.l1Cache, this.l2Cache}; } @Override public void setMaxSize(long size) { - this.lruCache.setMaxSize(size); + this.l1Cache.setMaxSize(size); } @Override public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { // A noop - this.lruCache.returnBlock(cacheKey, block); + this.l1Cache.returnBlock(cacheKey, block); this.l2Cache.returnBlock(cacheKey, block); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java new file mode 100644 index 0000000..ff0ff10 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.HeapSize; + +@InterfaceAudience.Private +public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize { + + /** + * Whether the cache contains the block with specified cacheKey + * + * @param cacheKey + * @return true if it contains the block + */ + boolean containsBlock(BlockCacheKey cacheKey); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java index 667e7b4..160714b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class InclusiveCombinedBlockCache extends CombinedBlockCache implements BlockCache { - public InclusiveCombinedBlockCache(LruBlockCache l1, BlockCache l2) { + public InclusiveCombinedBlockCache(FirstLevelBlockCache l1, BlockCache l2) { super(l1,l2); } @@ -34,7 +34,7 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache implements B // On all external cache set ups the lru should have the l2 cache set as the victimHandler // Because of that all requests that miss inside of the lru block cache will be // tried in the l2 block cache. - return lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + return l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } /** @@ -50,7 +50,7 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache implements B final boolean cacheDataInL1) { // This is the inclusive part of the combined block cache. // Every block is placed into both block caches. - lruCache.cacheBlock(cacheKey, buf, inMemory, true); + l1Cache.cacheBlock(cacheKey, buf, inMemory, true); // This assumes that insertion into the L2 block cache is either async or very fast. l2Cache.cacheBlock(cacheKey, buf, inMemory, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 68ce16c..0ed1610 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -95,7 +95,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; */ @InterfaceAudience.Private @JsonIgnoreProperties({"encodingCountsForTest"}) -public class LruBlockCache implements ResizableBlockCache, HeapSize { +public class LruBlockCache implements FirstLevelBlockCache { private static final Log LOG = LogFactory.getLog(LruBlockCache.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index d8a2589..2753c25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -328,7 +328,7 @@ public class TestCacheConfig { BlockCache [] bcs = cbc.getBlockCaches(); assertTrue(bcs[0] instanceof LruBlockCache); LruBlockCache lbc = (LruBlockCache)bcs[0]; - assertEquals(CacheConfig.getLruCacheSize(this.conf, + assertEquals(CacheConfig.getFirstLevelCacheSize(this.conf, ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()), lbc.getMaxSize()); assertTrue(bcs[1] instanceof BucketCache); BucketCache bc = (BucketCache)bcs[1]; @@ -347,7 +347,7 @@ public class TestCacheConfig { // from L1 happens, it does not fail because L2 can't take the eviction because block too big. this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f); MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); - long lruExpectedSize = CacheConfig.getLruCacheSize(this.conf, mu); + long lruExpectedSize = CacheConfig.getFirstLevelCacheSize(this.conf, mu); final int bcSize = 100; long bcExpectedSize = 100 * 1024 * 1024; // MB. assertTrue(lruExpectedSize < bcExpectedSize); diff --git a/pom.xml b/pom.xml index 450275c..0645f4c 100644 --- a/pom.xml +++ b/pom.xml @@ -1177,7 +1177,7 @@ yyyy-MM-dd'T'HH:mm ${maven.build.timestamp} - 1.7 + 1.8 3.0.4 ${compileSource} @@ -1197,6 +1197,7 @@ 3.2.2 3.1 3.1.2 + 2.2.3 12.0.1 1.3.9 1.9.13 @@ -1463,6 +1464,11 @@ ${slf4j.version} + com.github.ben-manes.caffeine + caffeine + ${caffeine.version} + + io.dropwizard.metrics metrics-core ${metrics-core.version} -- 2.5.3