From ec5c97f17711ca4c5d926e505f92da4adaeb7bae Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Thu, 29 Nov 2018 10:30:45 +0800 Subject: [PATCH] HBASE-21514 Refactor CacheConfig --- .../hbase/tmpl/regionserver/RSStatusTmpl.jamon | 5 +- .../hadoop/hbase/io/hfile/BlockCacheFactory.java | 236 ++++++++++ .../apache/hadoop/hbase/io/hfile/CacheConfig.java | 507 ++++----------------- .../hadoop/hbase/io/hfile/CombinedBlockCache.java | 8 + .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 142 +++--- .../hadoop/hbase/io/hfile/HFileWriterImpl.java | 4 +- .../hbase/io/hfile/bucket/BucketAllocator.java | 4 +- .../assignment/MergeTableRegionsProcedure.java | 11 +- .../assignment/SplitTableRegionProcedure.java | 13 +- .../apache/hadoop/hbase/mob/MobCacheConfig.java | 31 +- .../org/apache/hadoop/hbase/mob/MobFileCache.java | 11 +- .../hadoop/hbase/regionserver/HMobStore.java | 13 +- .../hadoop/hbase/regionserver/HRegionServer.java | 63 ++- .../apache/hadoop/hbase/regionserver/HStore.java | 10 +- .../hbase/regionserver/HeapMemoryManager.java | 16 +- .../MetricsRegionServerWrapperImpl.java | 38 +- .../hadoop/hbase/regionserver/RSRpcServices.java | 2 +- .../hbase/regionserver/RegionServerServices.java | 14 +- .../hadoop/hbase/MockRegionServerServices.java | 12 + .../hbase/io/encoding/TestEncodedSeekers.java | 9 +- .../hbase/io/hfile/TestBlockCacheReporting.java | 47 +- .../hadoop/hbase/io/hfile/TestCacheConfig.java | 53 +-- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 20 +- .../io/hfile/TestForceCacheImportantBlocks.java | 24 +- .../apache/hadoop/hbase/io/hfile/TestHFile.java | 9 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 2 +- .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java | 6 +- .../io/hfile/TestLazyDataBlockDecompression.java | 12 +- .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 20 +- .../hbase/io/hfile/TestScannerFromBucketCache.java | 3 +- .../hfile/TestScannerSelectionUsingKeyRange.java | 5 +- .../io/hfile/TestScannerSelectionUsingTTL.java | 32 +- .../hadoop/hbase/master/MockRegionServer.java | 12 + .../hbase/master/TestMasterNotCarryTable.java | 2 +- .../apache/hadoop/hbase/mob/TestMobFileCache.java | 8 +- .../hadoop/hbase/regionserver/TestBlocksRead.java | 36 +- .../hbase/regionserver/TestBlocksScanned.java | 7 +- .../regionserver/TestCacheOnWriteInSchema.java | 3 +- .../regionserver/TestClearRegionBlockCache.java | 46 +- .../regionserver/TestCompoundBloomFilter.java | 9 - .../hadoop/hbase/regionserver/TestHMobStore.java | 84 ++-- .../hadoop/hbase/regionserver/TestHStoreFile.java | 14 +- .../hbase/regionserver/TestMobStoreCompaction.java | 15 +- .../hbase/regionserver/TestRSStatusServlet.java | 7 +- .../hbase/regionserver/TestRecoveredEdits.java | 2 - .../regionserver/TestRowPrefixBloomFilter.java | 2 +- .../regionserver/TestSecureBulkLoadManager.java | 2 +- 47 files changed, 772 insertions(+), 859 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index 646d835..dde4ee2 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -29,13 +29,14 @@ org.apache.hadoop.hbase.regionserver.HRegionServer; org.apache.hadoop.hbase.client.RegionInfo; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.HBaseConfiguration; +org.apache.hadoop.hbase.io.hfile.CacheConfig; org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo; org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; <%doc>If json AND bcn is NOT an empty string presume it a block cache view request. <%if format.equals("json") && bcn != null && bcn.length() > 0 %> - <& BlockCacheViewTmpl; conf = regionServer.getConfiguration(); cacheConfig = regionServer.getCacheConfig(); bcn = bcn; bcv = bcv; &> + <& BlockCacheViewTmpl; conf = regionServer.getConfiguration(); cacheConfig = new CacheConfig(regionServer.getConfiguration(), regionServer.getBlockCache()); bcn = bcn; bcv = bcv; &> <%java return; %> <%elseif format.equals("json") %> <& ../common/TaskMonitorTmpl; filter = filter; format = "json" &> @@ -109,7 +110,7 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;

Block Cache

- <& BlockCacheTmpl; cacheConfig = regionServer.getCacheConfig(); config = regionServer.getConfiguration() &> + <& BlockCacheTmpl; cacheConfig = new CacheConfig(regionServer.getConfiguration(), regionServer.getBlockCache()); config = regionServer.getConfiguration() &>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java new file mode 100644 index 0000000..e8bee22 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -0,0 +1,236 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; +import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.io.util.MemorySizeUtil; +import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + +@InterfaceAudience.Private +public class BlockCacheFactory { + + private static final Logger LOG = LoggerFactory.getLogger(BlockCacheFactory.class.getName()); + + /** + * Configuration keys for Bucket cache + */ + + /** + * If the chosen ioengine can persist its state across restarts, the path to the file to persist + * to. This file is NOT the data file. It is a file into which we will serialize the map of + * what is in the data file. For example, if you pass the following argument as + * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), + * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file + * /tmp/bucketcache.data but the metadata on where the data is in the supplied file + * is an in-memory map that needs to be persisted across restarts. Where to store this + * in-memory state is what you supply here: e.g. /tmp/bucketcache.map. + */ + public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; + + public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; + + public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength"; + + /** + * A comma-delimited array of values for use as bucket sizes. + */ + public static final String BUCKET_CACHE_BUCKETS_KEY = "hbase.bucketcache.bucket.sizes"; + + /** + * Defaults for Bucket cache + */ + public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3; + public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64; + + /** + * The target block size used by blockcache instances. Defaults to + * {@link HConstants#DEFAULT_BLOCKSIZE}. + * TODO: this config point is completely wrong, as it's used to determine the + * target block size of BlockCache instances. Rename. + */ + public static final String BLOCKCACHE_BLOCKSIZE_KEY = "hbase.offheapcache.minblocksize"; + + private static final String EXTERNAL_BLOCKCACHE_KEY = "hbase.blockcache.use.external"; + private static final boolean EXTERNAL_BLOCKCACHE_DEFAULT = false; + + private static final String EXTERNAL_BLOCKCACHE_CLASS_KEY = "hbase.blockcache.external.class"; + + /** + * Only used for unit test which doesn't start region server. + * Clear this if in tests you'd make more than one block cache instance. + */ + @VisibleForTesting + @edu.umd.cs.findbugs.annotations.SuppressWarnings("MS_SHOULD_BE_FINAL") + public static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE; + + private BlockCacheFactory() { + } + + public static BlockCache createBlockCache(Configuration conf) { + LruBlockCache onHeapCache = createOnHeapCache(conf); + if (onHeapCache == null) { + return null; + } + boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); + if (useExternal) { + BlockCache l2CacheInstance = createExternalBlockcache(conf); + return l2CacheInstance == null ? + onHeapCache : + new InclusiveCombinedBlockCache(onHeapCache, l2CacheInstance); + } else { + // otherwise use the bucket cache. + BucketCache bucketCache = createBucketCache(conf); + if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { + // Non combined mode is off from 2.0 + LOG.warn( + "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); + } + return bucketCache == null ? onHeapCache : new CombinedBlockCache(onHeapCache, bucketCache); + } + } + + private static LruBlockCache createOnHeapCache(final Configuration c) { + final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c); + if (cacheSize < 0) { + return null; + } + int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); + LOG.info( + "Allocating onheap LruBlockCache size=" + StringUtils.byteDesc(cacheSize) + ", blockSize=" + + StringUtils.byteDesc(blockSize)); + return new LruBlockCache(cacheSize, blockSize, true, c); + } + + /** + * Enum of all built in external block caches. + * This is used for config. + */ + private static enum ExternalBlockCaches { + memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); + // TODO(eclark): Consider more. Redis, etc. + Class clazz; + ExternalBlockCaches(String clazzName) { + try { + clazz = (Class) Class.forName(clazzName); + } catch (ClassNotFoundException cnef) { + clazz = null; + } + } + ExternalBlockCaches(Class clazz) { + this.clazz = clazz; + } + } + + private static BlockCache createExternalBlockcache(Configuration c) { + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to use External l2 cache"); + } + Class klass = null; + + // Get the class, from the config. s + try { + klass = ExternalBlockCaches + .valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; + } catch (IllegalArgumentException exception) { + try { + klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName( + "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); + } catch (ClassNotFoundException e) { + return null; + } + } + + // Now try and create an instance of the block cache. + try { + LOG.info("Creating external block cache of type: " + klass); + return (BlockCache) ReflectionUtils.newInstance(klass, c); + } catch (Exception e) { + LOG.warn("Error creating external block cache", e); + } + return null; + + } + + private static BucketCache createBucketCache(Configuration c) { + // Check for L2. ioengine name must be non-null. + String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); + if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) { + return null; + } + + int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); + final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c); + if (bucketCacheSize <= 0) { + throw new IllegalStateException("bucketCacheSize <= 0; Check " + + BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size"); + } + if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) { + LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " + + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); + } + int writerThreads = c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, + DEFAULT_BUCKET_CACHE_WRITER_THREADS); + int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, + DEFAULT_BUCKET_CACHE_WRITER_QUEUE); + String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); + String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY); + int [] bucketSizes = null; + if (configuredBucketSizes != null) { + bucketSizes = new int[configuredBucketSizes.length]; + for (int i = 0; i < configuredBucketSizes.length; i++) { + int bucketSize = Integer.parseInt(configuredBucketSizes[i].trim()); + if (bucketSize % 256 != 0) { + // We need all the bucket sizes to be multiples of 256. Having all the configured bucket + // sizes to be multiples of 256 will ensure that the block offsets within buckets, + // that are calculated, will also be multiples of 256. + // See BucketEntry where offset to each block is represented using 5 bytes (instead of 8 + // bytes long). We would like to save heap overhead as less as possible. + throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '" + + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); + } + bucketSizes[i] = bucketSize; + } + } + BucketCache bucketCache = null; + try { + int ioErrorsTolerationDuration = c.getInt( + "hbase.bucketcache.ioengine.errors.tolerated.duration", + BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); + // Bucket cache logs its stats on creation internal to the constructor. + bucketCache = new BucketCache(bucketCacheIOEngineName, + bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, + ioErrorsTolerationDuration, c); + } catch (IOException ioex) { + LOG.error("Can't instantiate bucket cache", ioex); throw new RuntimeException(ioex); + } + return bucketCache; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index a022552..fc820bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -17,26 +17,15 @@ */ package org.apache.hadoop.hbase.io.hfile; -import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; -import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; - -import java.io.IOException; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; -import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; -import org.apache.hadoop.hbase.io.util.MemorySizeUtil; -import org.apache.hadoop.hbase.util.ReflectionUtils; -import org.apache.hadoop.util.StringUtils; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Stores all of the cache objects and configuration for a single HFile. */ @@ -44,7 +33,6 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti public class CacheConfig { private static final Logger LOG = LoggerFactory.getLogger(CacheConfig.class.getName()); - /** * Disabled cache configuration */ @@ -60,109 +48,38 @@ public class CacheConfig { * Configuration key to cache data blocks on write. There are separate * switches for bloom blocks and non-root index blocks. */ - public static final String CACHE_BLOCKS_ON_WRITE_KEY = - "hbase.rs.cacheblocksonwrite"; + public static final String CACHE_BLOCKS_ON_WRITE_KEY = "hbase.rs.cacheblocksonwrite"; /** * Configuration key to cache leaf and intermediate-level index blocks on * write. */ - public static final String CACHE_INDEX_BLOCKS_ON_WRITE_KEY = - "hfile.block.index.cacheonwrite"; + public static final String CACHE_INDEX_BLOCKS_ON_WRITE_KEY = "hfile.block.index.cacheonwrite"; /** * Configuration key to cache compound bloom filter blocks on write. */ - public static final String CACHE_BLOOM_BLOCKS_ON_WRITE_KEY = - "hfile.block.bloom.cacheonwrite"; + public static final String CACHE_BLOOM_BLOCKS_ON_WRITE_KEY = "hfile.block.bloom.cacheonwrite"; /** * Configuration key to cache data blocks in compressed and/or encrypted format. */ - public static final String CACHE_DATA_BLOCKS_COMPRESSED_KEY = - "hbase.block.data.cachecompressed"; + public static final String CACHE_DATA_BLOCKS_COMPRESSED_KEY = "hbase.block.data.cachecompressed"; /** * Configuration key to evict all blocks of a given file from the block cache * when the file is closed. */ - public static final String EVICT_BLOCKS_ON_CLOSE_KEY = - "hbase.rs.evictblocksonclose"; - - /** - * Configuration keys for Bucket cache - */ - - /** - * If the chosen ioengine can persist its state across restarts, the path to the file to persist - * to. This file is NOT the data file. It is a file into which we will serialize the map of - * what is in the data file. For example, if you pass the following argument as - * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), - * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file - * /tmp/bucketcache.data but the metadata on where the data is in the supplied file - * is an in-memory map that needs to be persisted across restarts. Where to store this - * in-memory state is what you supply here: e.g. /tmp/bucketcache.map. - */ - public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = - "hbase.bucketcache.persistent.path"; - - public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; - public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = - "hbase.bucketcache.writer.queuelength"; - - /** - * A comma-delimited array of values for use as bucket sizes. - */ - public static final String BUCKET_CACHE_BUCKETS_KEY = "hbase.bucketcache.bucket.sizes"; + public static final String EVICT_BLOCKS_ON_CLOSE_KEY = "hbase.rs.evictblocksonclose"; /** - * Defaults for Bucket cache - */ - public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3; - public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64; - - /** * Configuration key to prefetch all blocks of a given file into the block cache * when the file is opened. */ - public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = - "hbase.rs.prefetchblocksonopen"; + public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = "hbase.rs.prefetchblocksonopen"; - /** - * The target block size used by blockcache instances. Defaults to - * {@link HConstants#DEFAULT_BLOCKSIZE}. - * TODO: this config point is completely wrong, as it's used to determine the - * target block size of BlockCache instances. Rename. - */ - public static final String BLOCKCACHE_BLOCKSIZE_KEY = "hbase.offheapcache.minblocksize"; - - private static final String EXTERNAL_BLOCKCACHE_KEY = "hbase.blockcache.use.external"; - private static final boolean EXTERNAL_BLOCKCACHE_DEFAULT = false; - - private static final String EXTERNAL_BLOCKCACHE_CLASS_KEY = "hbase.blockcache.external.class"; - private static final String DROP_BEHIND_CACHE_COMPACTION_KEY = + public static final String DROP_BEHIND_CACHE_COMPACTION_KEY = "hbase.hfile.drop.behind.compaction"; - private static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true; - - /** - * Enum of all built in external block caches. - * This is used for config. - */ - private static enum ExternalBlockCaches { - memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); - // TODO(eclark): Consider more. Redis, etc. - Class clazz; - ExternalBlockCaches(String clazzName) { - try { - clazz = (Class) Class.forName(clazzName); - } catch (ClassNotFoundException cnef) { - clazz = null; - } - } - ExternalBlockCaches(Class clazz) { - this.clazz = clazz; - } - } // Defaults public static final boolean DEFAULT_CACHE_DATA_ON_READ = true; @@ -173,9 +90,7 @@ public class CacheConfig { public static final boolean DEFAULT_EVICT_ON_CLOSE = false; public static final boolean DEFAULT_CACHE_DATA_COMPRESSED = false; public static final boolean DEFAULT_PREFETCH_ON_OPEN = false; - - /** Local reference to the block cache, null if completely disabled */ - private final BlockCache blockCache; + public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true; /** * Whether blocks should be cached on read (default is on if there is a @@ -186,111 +101,79 @@ public class CacheConfig { private boolean cacheDataOnRead; /** Whether blocks should be flagged as in-memory when being cached */ - private final boolean inMemory; + private boolean inMemory; /** Whether data blocks should be cached when new files are written */ private boolean cacheDataOnWrite; /** Whether index blocks should be cached when new files are written */ - private final boolean cacheIndexesOnWrite; + private boolean cacheIndexesOnWrite; /** Whether compound bloom filter blocks should be cached on write */ - private final boolean cacheBloomsOnWrite; + private boolean cacheBloomsOnWrite; /** Whether blocks of a file should be evicted when the file is closed */ private boolean evictOnClose; /** Whether data blocks should be stored in compressed and/or encrypted form in the cache */ - private final boolean cacheDataCompressed; + private boolean cacheDataCompressed; /** Whether data blocks should be prefetched into the cache */ - private final boolean prefetchOnOpen; + private boolean prefetchOnOpen; - private final boolean dropBehindCompaction; + private boolean dropBehindCompaction; + + // Local reference to the block cache + private final BlockCache blockCache; /** * Create a cache configuration using the specified configuration object and - * family descriptor. + * defaults for family level settings. Only use if no column family context. * @param conf hbase configuration - * @param family column family configuration */ - public CacheConfig(Configuration conf, ColumnFamilyDescriptor family) { - this(GLOBAL_BLOCK_CACHE_INSTANCE, - conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) - && family.isBlockCacheEnabled(), - family.isInMemory(), - // For the following flags we enable them regardless of per-schema settings - // if they are enabled in the global configuration. - conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, - DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(), - conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, - DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(), - conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, - DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(), - conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, - DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(), - conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), - conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, - DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(), - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) - ); - LOG.info("Created cacheConfig for " + family.getNameAsString() + ": " + this); + public CacheConfig(Configuration conf) { + this(conf, null); + } + + public CacheConfig(Configuration conf, BlockCache blockCache) { + this(conf, null, blockCache); } /** * Create a cache configuration using the specified configuration object and - * defaults for family level settings. Only use if no column family context. Prefer - * {@link CacheConfig#CacheConfig(Configuration, ColumnFamilyDescriptor)} - * @see #CacheConfig(Configuration, ColumnFamilyDescriptor) + * family descriptor. * @param conf hbase configuration + * @param family column family configuration */ - public CacheConfig(Configuration conf) { - this(GLOBAL_BLOCK_CACHE_INSTANCE, - conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ), - DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set - // strictly from conf - conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE), - conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE), - conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE), - conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE), - conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), - conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN), - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT)); - LOG.info("Created cacheConfig: " + this); - } + public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache blockCache) { + this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ); + this.inMemory = DEFAULT_IN_MEMORY; + this.cacheDataOnWrite = conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE); + this.cacheIndexesOnWrite = + conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE); + this.cacheBloomsOnWrite = + conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE); + this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE); + this.cacheDataCompressed = + conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); + this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN); + this.dropBehindCompaction = + conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); + + if (family != null) { + this.cacheDataOnRead = this.cacheDataOnRead && family.isBlockCacheEnabled(); + this.inMemory = family.isInMemory(); + // For the following flags we enable them regardless of per-schema settings + // if they are enabled in the global configuration. + this.cacheDataOnWrite |= family.isCacheDataOnWrite(); + this.cacheIndexesOnWrite |= family.isCacheIndexesOnWrite(); + this.cacheBloomsOnWrite |= family.isCacheBloomsOnWrite(); + this.evictOnClose |= family.isEvictBlocksOnClose(); + this.prefetchOnOpen |= family.isPrefetchBlocksOnOpen(); + } - /** - * Create a block cache configuration with the specified cache and configuration parameters. - * @param blockCache reference to block cache, null if completely disabled - * @param cacheDataOnRead whether DATA blocks should be cached on read (we always cache INDEX - * blocks and BLOOM blocks; this cannot be disabled). - * @param inMemory whether blocks should be flagged as in-memory - * @param cacheDataOnWrite whether data blocks should be cached on write - * @param cacheIndexesOnWrite whether index blocks should be cached on write - * @param cacheBloomsOnWrite whether blooms should be cached on write - * @param evictOnClose whether blocks should be evicted when HFile is closed - * @param cacheDataCompressed whether to store blocks as compressed in the cache - * @param prefetchOnOpen whether to prefetch blocks upon open - * @param dropBehindCompaction indicate that we should set drop behind to true when open a store - * file reader for compaction - */ - @VisibleForTesting - CacheConfig(final BlockCache blockCache, - final boolean cacheDataOnRead, final boolean inMemory, - final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite, - final boolean cacheBloomsOnWrite, final boolean evictOnClose, - final boolean cacheDataCompressed, final boolean prefetchOnOpen, - final boolean dropBehindCompaction) { this.blockCache = blockCache; - this.cacheDataOnRead = cacheDataOnRead; - this.inMemory = inMemory; - this.cacheDataOnWrite = cacheDataOnWrite; - this.cacheIndexesOnWrite = cacheIndexesOnWrite; - this.cacheBloomsOnWrite = cacheBloomsOnWrite; - this.evictOnClose = evictOnClose; - this.cacheDataCompressed = cacheDataCompressed; - this.prefetchOnOpen = prefetchOnOpen; - this.dropBehindCompaction = dropBehindCompaction; + LOG.info("Created cacheConfig: " + this + " with blockCache=" + blockCache); } /** @@ -298,30 +181,29 @@ public class CacheConfig { * @param cacheConf */ public CacheConfig(CacheConfig cacheConf) { - this(cacheConf.blockCache, cacheConf.cacheDataOnRead, cacheConf.inMemory, - cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite, - cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose, - cacheConf.cacheDataCompressed, cacheConf.prefetchOnOpen, - cacheConf.dropBehindCompaction); + this.cacheDataOnRead = cacheConf.cacheDataOnRead; + this.inMemory = cacheConf.inMemory; + this.cacheDataOnWrite = cacheConf.cacheDataOnWrite; + this.cacheIndexesOnWrite = cacheConf.cacheIndexesOnWrite; + this.cacheBloomsOnWrite = cacheConf.cacheBloomsOnWrite; + this.evictOnClose = cacheConf.evictOnClose; + this.cacheDataCompressed = cacheConf.cacheDataCompressed; + this.prefetchOnOpen = cacheConf.prefetchOnOpen; + this.dropBehindCompaction = cacheConf.dropBehindCompaction; + this.blockCache = cacheConf.blockCache; } private CacheConfig() { - this(null, false, false, false, false, false, false, false, false, false); - } - - /** - * Checks whether the block cache is enabled. - */ - public boolean isBlockCacheEnabled() { - return this.blockCache != null; - } - - /** - * Returns the block cache. - * @return the block cache, or null if caching is completely disabled - */ - public BlockCache getBlockCache() { - return this.blockCache; + this.cacheDataOnRead = false; + this.inMemory = false; + this.cacheDataOnWrite = false; + this.cacheIndexesOnWrite = false; + this.cacheBloomsOnWrite = false; + this.evictOnClose = false; + this.cacheDataCompressed = false; + this.prefetchOnOpen = false; + this.dropBehindCompaction = false; + this.blockCache = null; } /** @@ -330,7 +212,7 @@ public class CacheConfig { * @return true if blocks should be cached on read, false if not */ public boolean shouldCacheDataOnRead() { - return isBlockCacheEnabled() && cacheDataOnRead; + return cacheDataOnRead; } public boolean shouldDropBehindCompaction() { @@ -343,20 +225,16 @@ public class CacheConfig { * available. */ public boolean shouldCacheBlockOnRead(BlockCategory category) { - return isBlockCacheEnabled() - && (cacheDataOnRead || - category == BlockCategory.INDEX || - category == BlockCategory.BLOOM || - (prefetchOnOpen && - (category != BlockCategory.META && - category != BlockCategory.UNKNOWN))); + return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM + || (prefetchOnOpen && (category != BlockCategory.META + && category != BlockCategory.UNKNOWN)); } /** * @return true if blocks in this file should be flagged as in-memory */ public boolean isInMemory() { - return isBlockCacheEnabled() && this.inMemory; + return this.inMemory; } /** @@ -364,7 +242,7 @@ public class CacheConfig { * written, false if not */ public boolean shouldCacheDataOnWrite() { - return isBlockCacheEnabled() && this.cacheDataOnWrite; + return this.cacheDataOnWrite; } /** @@ -382,7 +260,7 @@ public class CacheConfig { * is written, false if not */ public boolean shouldCacheIndexesOnWrite() { - return isBlockCacheEnabled() && this.cacheIndexesOnWrite; + return this.cacheIndexesOnWrite; } /** @@ -390,7 +268,7 @@ public class CacheConfig { * is written, false if not */ public boolean shouldCacheBloomsOnWrite() { - return isBlockCacheEnabled() && this.cacheBloomsOnWrite; + return this.cacheBloomsOnWrite; } /** @@ -398,7 +276,7 @@ public class CacheConfig { * reader is closed, false if not */ public boolean shouldEvictOnClose() { - return isBlockCacheEnabled() && this.evictOnClose; + return this.evictOnClose; } /** @@ -414,14 +292,13 @@ public class CacheConfig { * @return true if data blocks should be compressed in the cache, false if not */ public boolean shouldCacheDataCompressed() { - return isBlockCacheEnabled() && this.cacheDataOnRead && this.cacheDataCompressed; + return this.cacheDataOnRead && this.cacheDataCompressed; } /** * @return true if this {@link BlockCategory} should be compressed in blockcache, false otherwise */ public boolean shouldCacheCompressed(BlockCategory category) { - if (!isBlockCacheEnabled()) return false; switch (category) { case DATA: return this.cacheDataOnRead && this.cacheDataCompressed; @@ -434,7 +311,7 @@ public class CacheConfig { * @return true if blocks should be prefetched into the cache on open, false if not */ public boolean shouldPrefetchOnOpen() { - return isBlockCacheEnabled() && this.prefetchOnOpen; + return this.prefetchOnOpen; } /** @@ -446,9 +323,6 @@ public class CacheConfig { * configuration. */ public boolean shouldReadBlockFromCache(BlockType blockType) { - if (!isBlockCacheEnabled()) { - return false; - } if (cacheDataOnRead) { return true; } @@ -461,8 +335,8 @@ public class CacheConfig { if (blockType == null) { return true; } - if (blockType.getCategory() == BlockCategory.BLOOM || - blockType.getCategory() == BlockCategory.INDEX) { + if (blockType.getCategory() == BlockCategory.BLOOM + || blockType.getCategory() == BlockCategory.INDEX) { return true; } return false; @@ -479,206 +353,21 @@ public class CacheConfig { return shouldCacheBlockOnRead(blockType.getCategory()); } - @Override - public String toString() { - if (!isBlockCacheEnabled()) { - return "CacheConfig:disabled"; - } - return "blockCache=" + getBlockCache() + - ", cacheDataOnRead=" + shouldCacheDataOnRead() + - ", cacheDataOnWrite=" + shouldCacheDataOnWrite() + - ", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + - ", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + - ", cacheEvictOnClose=" + shouldEvictOnClose() + - ", cacheDataCompressed=" + shouldCacheDataCompressed() + - ", prefetchOnOpen=" + shouldPrefetchOnOpen(); - } - - // Static block cache reference and methods - - /** - * Static reference to the block cache, or null if no caching should be used - * at all. - */ - // Clear this if in tests you'd make more than one block cache instance. - @VisibleForTesting - static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE; - private static LruBlockCache ONHEAP_CACHE_INSTANCE = null; - private static BlockCache L2_CACHE_INSTANCE = null;// Can be BucketCache or External cache. - - /** Boolean whether we have disabled the block cache entirely. */ - @VisibleForTesting - static boolean blockCacheDisabled = false; - - /** - * @param c Configuration to use. - * @return An L1 instance. Currently an instance of LruBlockCache. - */ - public static LruBlockCache getOnHeapCache(final Configuration c) { - return getOnHeapCacheInternal(c); - } - - public CacheStats getOnHeapCacheStats() { - if (ONHEAP_CACHE_INSTANCE != null) { - return ONHEAP_CACHE_INSTANCE.getStats(); - } - return null; - } - - public CacheStats getL2CacheStats() { - if (L2_CACHE_INSTANCE != null) { - return L2_CACHE_INSTANCE.getStats(); - } - return null; - } - /** - * @param c Configuration to use. - * @return An L1 instance. Currently an instance of LruBlockCache. - */ - private synchronized static LruBlockCache getOnHeapCacheInternal(final Configuration c) { - if (ONHEAP_CACHE_INSTANCE != null) { - return ONHEAP_CACHE_INSTANCE; - } - final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c); - if (cacheSize < 0) { - blockCacheDisabled = true; - } - if (blockCacheDisabled) return null; - int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); - LOG.info("Allocating onheap LruBlockCache size=" + - StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); - ONHEAP_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c); - return ONHEAP_CACHE_INSTANCE; - } - - private static BlockCache getExternalBlockcache(Configuration c) { - if (LOG.isDebugEnabled()) { - LOG.debug("Trying to use External l2 cache"); - } - Class klass = null; - - // Get the class, from the config. s - try { - klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; - } catch (IllegalArgumentException exception) { - try { - klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName( - "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); - } catch (ClassNotFoundException e) { - return null; - } - } - - // Now try and create an instance of the block cache. - try { - LOG.info("Creating external block cache of type: " + klass); - return (BlockCache) ReflectionUtils.newInstance(klass, c); - } catch (Exception e) { - LOG.warn("Error creating external block cache", e); - } - return null; - - } - - @VisibleForTesting - static BucketCache getBucketCache(Configuration c) { - // Check for L2. ioengine name must be non-null. - String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); - if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null; - - int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); - final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c); - if (bucketCacheSize <= 0) { - throw new IllegalStateException("bucketCacheSize <= 0; Check " + - BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size"); - } - if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) { - LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " - + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); - } - int writerThreads = c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, - DEFAULT_BUCKET_CACHE_WRITER_THREADS); - int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, - DEFAULT_BUCKET_CACHE_WRITER_QUEUE); - String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); - String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY); - int [] bucketSizes = null; - if (configuredBucketSizes != null) { - bucketSizes = new int[configuredBucketSizes.length]; - for (int i = 0; i < configuredBucketSizes.length; i++) { - int bucketSize = Integer.parseInt(configuredBucketSizes[i].trim()); - if (bucketSize % 256 != 0) { - // We need all the bucket sizes to be multiples of 256. Having all the configured bucket - // sizes to be multiples of 256 will ensure that the block offsets within buckets, - // that are calculated, will also be multiples of 256. - // See BucketEntry where offset to each block is represented using 5 bytes (instead of 8 - // bytes long). We would like to save heap overhead as less as possible. - throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '" - + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); - } - bucketSizes[i] = bucketSize; - } - } - BucketCache bucketCache = null; - try { - int ioErrorsTolerationDuration = c.getInt( - "hbase.bucketcache.ioengine.errors.tolerated.duration", - BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); - // Bucket cache logs its stats on creation internal to the constructor. - bucketCache = new BucketCache(bucketCacheIOEngineName, - bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, - ioErrorsTolerationDuration, c); - } catch (IOException ioex) { - LOG.error("Can't instantiate bucket cache", ioex); throw new RuntimeException(ioex); - } - return bucketCache; - } - - /** - * Returns the block cache or null in case none should be used. - * Sets GLOBAL_BLOCK_CACHE_INSTANCE + * Returns the block cache. * - * @param conf The current configuration. - * @return The block cache or null. + * @return the block cache, or null if caching is completely disabled */ - public static synchronized BlockCache instantiateBlockCache(Configuration conf) { - if (GLOBAL_BLOCK_CACHE_INSTANCE != null) { - return GLOBAL_BLOCK_CACHE_INSTANCE; - } - if (blockCacheDisabled) { - return null; - } - LruBlockCache onHeapCache = getOnHeapCacheInternal(conf); - // blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the - // call. - if (blockCacheDisabled) { - return null; - } - boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); - if (useExternal) { - L2_CACHE_INSTANCE = getExternalBlockcache(conf); - GLOBAL_BLOCK_CACHE_INSTANCE = L2_CACHE_INSTANCE == null ? onHeapCache - : new InclusiveCombinedBlockCache(onHeapCache, L2_CACHE_INSTANCE); - } else { - // otherwise use the bucket cache. - L2_CACHE_INSTANCE = getBucketCache(conf); - if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { - // Non combined mode is off from 2.0 - LOG.warn( - "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); - } - GLOBAL_BLOCK_CACHE_INSTANCE = L2_CACHE_INSTANCE == null ? onHeapCache - : new CombinedBlockCache(onHeapCache, L2_CACHE_INSTANCE); - } - return GLOBAL_BLOCK_CACHE_INSTANCE; + public BlockCache getBlockCache() { + return this.blockCache; } - // Supposed to use only from tests. Some tests want to reinit the Global block cache instance - @VisibleForTesting - static synchronized void clearGlobalInstances() { - ONHEAP_CACHE_INSTANCE = null; - L2_CACHE_INSTANCE = null; - GLOBAL_BLOCK_CACHE_INSTANCE = null; + @Override + public String toString() { + return "cacheDataOnRead=" + shouldCacheDataOnRead() + ", cacheDataOnWrite=" + + shouldCacheDataOnWrite() + ", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + + ", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + ", cacheEvictOnClose=" + + shouldEvictOnClose() + ", cacheDataCompressed=" + shouldCacheDataCompressed() + + ", prefetchOnOpen=" + shouldPrefetchOnOpen(); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 5b17b38..047feef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -381,4 +381,12 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { return (this.l2Cache instanceof BucketCache) ? ((BucketCache) this.l2Cache).getRefCount(cacheKey) : 0; } + + public BlockCache getOnHeapCache() { + return this.onHeapCache; + } + + public BlockCache getL2Cache() { + return this.l2Cache; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index a4a40ba..e6a6493 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1293,69 +1293,67 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { * Retrieve block from cache. Validates the retrieved block's type vs {@code expectedBlockType} * and its encoding vs. {@code expectedDataBlockEncoding}. Unpacks the block as necessary. */ - private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock, - boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) throws IOException { - // Check cache for block. If found return. - if (cacheConf.isBlockCacheEnabled()) { - BlockCache cache = cacheConf.getBlockCache(); - HFileBlock cachedBlock = (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, - updateCacheMetrics); - if (cachedBlock != null) { - if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { - HFileBlock compressedBlock = cachedBlock; - cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader); - // In case of compressed block after unpacking we can return the compressed block + private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock, + boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, + DataBlockEncoding expectedDataBlockEncoding) throws IOException { + // Check cache for block. If found return. + BlockCache cache = cacheConf.getBlockCache(); + if (cache != null) { + HFileBlock cachedBlock = + (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics); + if (cachedBlock != null) { + if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { + HFileBlock compressedBlock = cachedBlock; + cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader); + // In case of compressed block after unpacking we can return the compressed block if (compressedBlock != cachedBlock) { cache.returnBlock(cacheKey, compressedBlock); } } - validateBlockType(cachedBlock, expectedBlockType); - - if (expectedDataBlockEncoding == null) { - return cachedBlock; - } - DataBlockEncoding actualDataBlockEncoding = - cachedBlock.getDataBlockEncoding(); - // Block types other than data blocks always have - // DataBlockEncoding.NONE. To avoid false negative cache misses, only - // perform this check if cached block is a data block. - if (cachedBlock.getBlockType().isData() && - !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) { - // This mismatch may happen if a Scanner, which is used for say a - // compaction, tries to read an encoded block from the block cache. - // The reverse might happen when an EncodedScanner tries to read - // un-encoded blocks which were cached earlier. - // - // Because returning a data block with an implicit BlockType mismatch - // will cause the requesting scanner to throw a disk read should be - // forced here. This will potentially cause a significant number of - // cache misses, so update so we should keep track of this as it might - // justify the work on a CompoundScanner. - if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) && - !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)) { - // If the block is encoded but the encoding does not match the - // expected encoding it is likely the encoding was changed but the - // block was not yet evicted. Evictions on file close happen async - // so blocks with the old encoding still linger in cache for some - // period of time. This event should be rare as it only happens on - // schema definition change. - LOG.info("Evicting cached block with key " + cacheKey + - " because of a data block encoding mismatch" + - "; expected: " + expectedDataBlockEncoding + - ", actual: " + actualDataBlockEncoding); - // This is an error scenario. so here we need to decrement the - // count. - cache.returnBlock(cacheKey, cachedBlock); - cache.evictBlock(cacheKey); - } - return null; - } - return cachedBlock; - } - } - return null; - } + validateBlockType(cachedBlock, expectedBlockType); + + if (expectedDataBlockEncoding == null) { + return cachedBlock; + } + DataBlockEncoding actualDataBlockEncoding = cachedBlock.getDataBlockEncoding(); + // Block types other than data blocks always have + // DataBlockEncoding.NONE. To avoid false negative cache misses, only + // perform this check if cached block is a data block. + if (cachedBlock.getBlockType().isData() && !actualDataBlockEncoding + .equals(expectedDataBlockEncoding)) { + // This mismatch may happen if a Scanner, which is used for say a + // compaction, tries to read an encoded block from the block cache. + // The reverse might happen when an EncodedScanner tries to read + // un-encoded blocks which were cached earlier. + // + // Because returning a data block with an implicit BlockType mismatch + // will cause the requesting scanner to throw a disk read should be + // forced here. This will potentially cause a significant number of + // cache misses, so update so we should keep track of this as it might + // justify the work on a CompoundScanner. + if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) && !actualDataBlockEncoding + .equals(DataBlockEncoding.NONE)) { + // If the block is encoded but the encoding does not match the + // expected encoding it is likely the encoding was changed but the + // block was not yet evicted. Evictions on file close happen async + // so blocks with the old encoding still linger in cache for some + // period of time. This event should be rare as it only happens on + // schema definition change. + LOG.info("Evicting cached block with key " + cacheKey + + " because of a data block encoding mismatch" + "; expected: " + + expectedDataBlockEncoding + ", actual: " + actualDataBlockEncoding); + // This is an error scenario. so here we need to decrement the + // count. + cache.returnBlock(cacheKey, cachedBlock); + cache.evictBlock(cacheKey); + } + return null; + } + return cachedBlock; + } + } + return null; + } /** * @param metaBlockName @@ -1391,17 +1389,15 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { this.isPrimaryReplicaReader(), BlockType.META); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); - if (cacheConf.isBlockCacheEnabled()) { - HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, false, true, true, - BlockType.META, null); - if (cachedBlock != null) { - assert cachedBlock.isUnpacked() : "Packed block leak."; - // Return a distinct 'shallow copy' of the block, - // so pos does not get messed by the scanner - return cachedBlock; - } - // Cache Miss, please load. + HFileBlock cachedBlock = + getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null); + if (cachedBlock != null) { + assert cachedBlock.isUnpacked() : "Packed block leak."; + // Return a distinct 'shallow copy' of the block, + // so pos does not get messed by the scanner + return cachedBlock; } + // Cache Miss, please load. HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false). unpack(hfileContext, fsBlockReader); @@ -1491,7 +1487,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory(); // Cache the block if necessary - if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) { + if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category) && + cacheConf.getBlockCache() != null) { cacheConf.getBlockCache().cacheBlock(cacheKey, cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked, cacheConf.isInMemory()); @@ -1569,8 +1566,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { @Override public void close(boolean evictOnClose) throws IOException { PrefetchExecutor.cancel(path); - if (evictOnClose && cacheConf.isBlockCacheEnabled()) { - int numEvicted = cacheConf.getBlockCache().evictBlocksByHfileName(name); + BlockCache cache = cacheConf.getBlockCache(); + if (evictOnClose && cache != null) { + int numEvicted = cache.evictBlocksByHfileName(name); if (LOG.isTraceEnabled()) { LOG.trace("On close, file=" + name + " evicted=" + numEvicted + " block(s)"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index cfc3dd9..ef506b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -344,7 +344,7 @@ public class HFileWriterImpl implements HFile.Writer { dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), lastDataBlockOffset, onDiskSize); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); - if (cacheConf.shouldCacheDataOnWrite()) { + if (cacheConf.shouldCacheDataOnWrite() && cacheConf.getBlockCache() != null) { doCacheOnWrite(lastDataBlockOffset); } } @@ -529,7 +529,7 @@ public class HFileWriterImpl implements HFile.Writer { blockWriter.getUncompressedSizeWithoutHeader()); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); - if (cacheThisBlock) { + if (cacheThisBlock && cacheConf.getBlockCache() != null) { doCacheOnWrite(offset); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index b8e4a0d..6caa975 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -29,11 +29,11 @@ import java.util.Queue; import java.util.Set; import java.util.concurrent.atomic.LongAdder; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BucketEntry; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; @@ -451,7 +451,7 @@ public final class BucketAllocator { BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize); if (bsi == null) { throw new BucketAllocatorException("Allocation too big size=" + blockSize + - "; adjust BucketCache sizes " + CacheConfig.BUCKET_CACHE_BUCKETS_KEY + + "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + " to accomodate if size seems reasonable and you want it cached."); } long offset = bsi.allocateBlock(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index 7811d9b..8dfed15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -638,17 +638,16 @@ public class MergeTableRegionsProcedure final Configuration conf = env.getMasterConfiguration(); final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); - for (String family: regionFs.getFamilies()) { + for (String family : regionFs.getFamilies()) { final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family)); final Collection storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { - final CacheConfig cacheConf = new CacheConfig(conf, hcd); - for (StoreFileInfo storeFileInfo: storeFiles) { + for (StoreFileInfo storeFileInfo : storeFiles) { // Create reference file(s) of the region in mergedDir - regionFs.mergeStoreFile(mergedRegion, family, new HStoreFile(mfs.getFileSystem(), - storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true), - mergedDir); + regionFs.mergeStoreFile(mergedRegion, family, + new HStoreFile(mfs.getFileSystem(), storeFileInfo, conf, CacheConfig.DISABLED, + hcd.getBloomFilterType(), true), mergedDir); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index b66d91f..dcfd500 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -638,16 +638,15 @@ public class SplitTableRegionProcedure TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); // Split each store file. - for (Map.Entry>e: files.entrySet()) { - byte [] familyName = Bytes.toBytes(e.getKey()); + for (Map.Entry> e : files.entrySet()) { + byte[] familyName = Bytes.toBytes(e.getKey()); final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName); final Collection storeFiles = e.getValue(); if (storeFiles != null && storeFiles.size() > 0) { - final CacheConfig cacheConf = new CacheConfig(conf, hcd); - for (StoreFileInfo storeFileInfo: storeFiles) { - StoreFileSplitter sfs = - new StoreFileSplitter(regionFs, familyName, new HStoreFile(mfs.getFileSystem(), - storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true)); + for (StoreFileInfo storeFileInfo : storeFiles) { + StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, + new HStoreFile(mfs.getFileSystem(), storeFileInfo, conf, CacheConfig.DISABLED, + hcd.getBloomFilterType(), true)); futures.add(threadPool.submit(sfs)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java index 2305eba..f8d7107 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.mob; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -30,35 +30,20 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; @InterfaceAudience.Private public class MobCacheConfig extends CacheConfig { - private static MobFileCache mobFileCache; + private MobFileCache mobFileCache; - public MobCacheConfig(Configuration conf, ColumnFamilyDescriptor family) { - super(conf, family); - instantiateMobFileCache(conf); - } - - public MobCacheConfig(Configuration conf) { - super(conf); - instantiateMobFileCache(conf); - } - - /** - * Instantiates the MobFileCache. - * @param conf The current configuration. - * @return The current instance of MobFileCache. - */ - public static synchronized MobFileCache instantiateMobFileCache(Configuration conf) { - if (mobFileCache == null) { - mobFileCache = new MobFileCache(conf); - } - return mobFileCache; + public MobCacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache blockCache, + MobFileCache mobFileCache) { + super(conf, family, blockCache); + this.mobFileCache = mobFileCache; } /** * Gets the MobFileCache. + * * @return The MobFileCache. */ public MobFileCache getMobFileCache() { return mobFileCache; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java index b709d06..ef3d3b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java @@ -34,11 +34,12 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.util.IdLock; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.IdLock; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -52,6 +53,14 @@ public class MobFileCache { private static final Logger LOG = LoggerFactory.getLogger(MobFileCache.class); + /** + * Only used for unit test which doesn't start region server. + * Clear this if in tests you'd make more than one mob file cache instance. + */ + @VisibleForTesting + @edu.umd.cs.findbugs.annotations.SuppressWarnings("MS_SHOULD_BE_FINAL") + public static MobFileCache GLOBAL_MOB_FILE_CACHE_INSTANCE; + /* * Eviction and statistics thread. Periodically run to print the statistics and * evict the lru cached mob files when the count of the cached files is larger diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 740eb08..7d8e18d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -47,10 +47,12 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.CorruptHFileException; import org.apache.hadoop.hbase.mob.MobCacheConfig; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobFile; +import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.mob.MobFileName; import org.apache.hadoop.hbase.mob.MobStoreEngine; import org.apache.hadoop.hbase.mob.MobUtils; @@ -130,7 +132,16 @@ public class HMobStore extends HStore { */ @Override protected void createCacheConf(ColumnFamilyDescriptor family) { - cacheConf = new MobCacheConfig(conf, family); + if (region.getRegionServerServices() != null) { + cacheConf = new MobCacheConfig(conf, family, region.getRegionServerServices().getBlockCache(), + region.getRegionServerServices().getMobFileCache()); + } else { + // Used for unit test which doesn't start region server. + // The global block cache instance need to be instantiated by the unit test. + // The global mob file cache instance need to be instantiated by the unit test. + cacheConf = new MobCacheConfig(conf, family, BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE, + MobFileCache.GLOBAL_MOB_FILE_CACHE_INSTANCE); + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b9d606d..f2879d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -98,8 +98,10 @@ import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; +import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper; @@ -114,7 +116,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.mob.MobCacheConfig; +import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore; @@ -418,10 +420,10 @@ public class HRegionServer extends HasThread implements private final RegionServerAccounting regionServerAccounting; - // Cache configuration and block cache reference - protected CacheConfig cacheConfig; - // Cache configuration for mob - final MobCacheConfig mobCacheConfig; + // Block cache + private BlockCache blockCache; + // The cache for mob files + private MobFileCache mobFileCache; /** The health check chore. */ private HealthCheckChore healthCheckChore; @@ -599,12 +601,12 @@ public class HRegionServer extends HasThread implements boolean isMasterNotCarryTable = this instanceof HMaster && !LoadBalancer.isTablesOnMaster(conf); - // no need to instantiate global block cache when master not carry table + + // no need to instantiate block cache and mob file cache when master not carry table if (!isMasterNotCarryTable) { - CacheConfig.instantiateBlockCache(conf); + blockCache = BlockCacheFactory.createBlockCache(conf); + mobFileCache = new MobFileCache(conf); } - cacheConfig = new CacheConfig(conf); - mobCacheConfig = new MobCacheConfig(conf); uncaughtExceptionHandler = new UncaughtExceptionHandler() { @Override @@ -1071,10 +1073,12 @@ public class HRegionServer extends HasThread implements } } // Send cache a shutdown. - if (cacheConfig != null && cacheConfig.isBlockCacheEnabled()) { - cacheConfig.getBlockCache().shutdown(); + if (blockCache != null) { + blockCache.shutdown(); + } + if (mobFileCache != null) { + mobFileCache.shutdown(); } - mobCacheConfig.getMobFileCache().shutdown(); if (movedRegionsCleaner != null) { movedRegionsCleaner.stop("Region Server stopping"); @@ -1618,9 +1622,16 @@ public class HRegionServer extends HasThread implements } private void startHeapMemoryManager() { - this.hMemManager = HeapMemoryManager.create(this.conf, this.cacheFlusher, this, - this.regionServerAccounting); - if (this.hMemManager != null) { + if (this.blockCache != null) { + if (this.blockCache instanceof CombinedBlockCache) { + this.hMemManager = new HeapMemoryManager( + (ResizableBlockCache) ((CombinedBlockCache) this.blockCache).getOnHeapCache(), + this.cacheFlusher, this, regionServerAccounting); + } else { + this.hMemManager = + new HeapMemoryManager((ResizableBlockCache) this.blockCache, this.cacheFlusher, this, + regionServerAccounting); + } this.hMemManager.start(getChoreService()); } } @@ -3630,10 +3641,23 @@ public class HRegionServer extends HasThread implements } /** - * @return The cache config instance used by the regionserver. + * May be null if this is a master which not carry table. + * + * @return The block cache instance used by the regionserver. + */ + @Override + public BlockCache getBlockCache() { + return this.blockCache; + } + + /** + * May be null if this is a master which not carry table. + * + * @return The cache for mob files used by the regionserver. */ - public CacheConfig getCacheConfig() { - return this.cacheConfig; + @Override + public MobFileCache getMobFileCache() { + return this.mobFileCache; } /** @@ -3662,7 +3686,6 @@ public class HRegionServer extends HasThread implements } public CacheEvictionStats clearRegionBlockCache(Region region) { - BlockCache blockCache = this.getCacheConfig().getBlockCache(); long evictedBlocks = 0; for(Store store : region.getStores()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 032dc5f..359dac0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -377,7 +378,14 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat * @param family The current column family. */ protected void createCacheConf(final ColumnFamilyDescriptor family) { - this.cacheConf = new CacheConfig(conf, family); + if (region.getRegionServerServices() != null) { + this.cacheConf = + new CacheConfig(conf, family, region.getRegionServerServices().getBlockCache()); + } else { + // Used for unit test which doesn't start region server. + // The global block cache instance need to be instantiated by the unit test. + this.cacheConf = new CacheConfig(conf, family, BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE); + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index c32fce2..199c9b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -28,15 +28,14 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; +import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; -import org.apache.hadoop.hbase.io.util.MemorySizeUtil; -import org.apache.hadoop.util.ReflectionUtils; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; @@ -105,15 +104,6 @@ public class HeapMemoryManager { private List tuneObservers = new ArrayList<>(); - public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher, - Server server, RegionServerAccounting regionServerAccounting) { - ResizableBlockCache lruCache = CacheConfig.getOnHeapCache(conf); - if (lruCache != null) { - return new HeapMemoryManager(lruCache, memStoreFlusher, server, regionServerAccounting); - } - return null; - } - @VisibleForTesting HeapMemoryManager(ResizableBlockCache blockCache, FlushRequester memStoreFlusher, Server server, RegionServerAccounting regionServerAccounting) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 04a32c8..3854325 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -32,9 +32,8 @@ import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheStats; -import org.apache.hadoop.hbase.mob.MobCacheConfig; +import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -148,34 +147,24 @@ class MetricsRegionServerWrapperImpl } } - /** - * It's possible that due to threading the block cache could not be initialized - * yet (testing multiple region servers in one jvm). So we need to try and initialize - * the blockCache and cacheStats reference multiple times until we succeed. - */ - private synchronized void initBlockCache() { - CacheConfig cacheConfig = this.regionServer.cacheConfig; - if (cacheConfig != null) { - l1Stats = cacheConfig.getOnHeapCacheStats(); - l2Stats = cacheConfig.getL2CacheStats(); - if (this.blockCache == null) { - this.blockCache = cacheConfig.getBlockCache(); - } - } - - if (this.blockCache != null && this.cacheStats == null) { + private void initBlockCache() { + this.blockCache = this.regionServer.getBlockCache(); + if (this.blockCache != null) { this.cacheStats = blockCache.getStats(); + if (this.blockCache instanceof CombinedBlockCache) { + l1Stats = ((CombinedBlockCache) this.blockCache).getOnHeapCache().getStats(); + l2Stats = ((CombinedBlockCache) this.blockCache).getL2Cache().getStats(); + } else { + l1Stats = this.blockCache.getStats(); + } } } /** * Initializes the mob file cache. */ - private synchronized void initMobFileCache() { - MobCacheConfig mobCacheConfig = this.regionServer.mobCacheConfig; - if (mobCacheConfig != null && this.mobFileCache == null) { - this.mobFileCache = mobCacheConfig.getMobFileCache(); - } + private void initMobFileCache() { + this.mobFileCache = this.regionServer.getMobFileCache(); } @Override @@ -734,9 +723,6 @@ class MetricsRegionServerWrapperImpl @Override synchronized public void run() { try { - initBlockCache(); - initMobFileCache(); - HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); HDFSBlocksDistribution hdfsBlocksDistributionSecondaryRegions = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index df84dcf..4b4e36d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -3683,7 +3683,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, stats.addException(region.getRegionInfo().getRegionName(), e); } } - stats.withMaxCacheSize(regionServer.getCacheConfig().getBlockCache().getMaxSize()); + stats.withMaxCacheSize(regionServer.getBlockCache().getMaxSize()); return builder.setStats(ProtobufUtil.toCacheEvictionStats(stats.build())).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 37a3606..00be650 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -31,7 +31,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.locking.EntityLock; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.quotas.RegionSizeStore; @@ -266,4 +268,14 @@ public interface RegionServerServices extends Server, MutableOnlineRegions, Favo * @return Return table descriptors implementation. */ TableDescriptors getTableDescriptors(); -} + + /** + * @return The block cache instance. + */ + BlockCache getBlockCache(); + + /** + * @return The cache for mob files. + */ + MobFileCache getMobFileCache(); +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index c0a2a8c..41f0610 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -36,7 +36,9 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.locking.EntityLock; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.quotas.RegionSizeStore; @@ -361,4 +363,14 @@ public class MockRegionServerServices implements RegionServerServices { public TableDescriptors getTableDescriptors() { return null; } + + @Override + public BlockCache getBlockCache() { + return null; + } + + @Override + public MobFileCache getMobFileCache() { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java index 11d7bb4..ca9f51c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.regionserver.BloomType; @@ -112,9 +112,10 @@ public class TestEncodedSeekers { if(includeTags) { testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3); } - CacheConfig.instantiateBlockCache(testUtil.getConfiguration()); - LruBlockCache cache = - (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache(); + + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = + BlockCacheFactory.createBlockCache(testUtil.getConfiguration()); + LruBlockCache cache = (LruBlockCache) BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE; cache.clearCache(); // Need to disable default row bloom filter for this test to pass. HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS). diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java index 19919e0..1313f31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hbase.io.hfile; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Map; import java.util.NavigableSet; import java.util.Objects; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -31,7 +33,6 @@ import org.apache.hadoop.hbase.io.hfile.TestCacheConfig.DataCacheEntry; import org.apache.hadoop.hbase.io.hfile.TestCacheConfig.IndexCacheEntry; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.After; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -51,16 +52,9 @@ public class TestBlockCacheReporting { @Before public void setUp() throws Exception { - CacheConfig.clearGlobalInstances(); this.conf = HBaseConfiguration.create(); } - @After - public void tearDown() throws Exception { - // Let go of current block cache. - CacheConfig.clearGlobalInstances(); - } - private void addDataAndHits(final BlockCache bc, final int count) { Cacheable dce = new DataCacheEntry(); Cacheable ice = new IndexCacheEntry(); @@ -85,39 +79,36 @@ public class TestBlockCacheReporting { public void testBucketCache() throws IOException { this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100); - CacheConfig.instantiateBlockCache(this.conf); - CacheConfig cc = new CacheConfig(this.conf); - assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); - logPerBlock(cc.getBlockCache()); + BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); + assertTrue(blockCache instanceof CombinedBlockCache); + logPerBlock(blockCache); final int count = 3; - addDataAndHits(cc.getBlockCache(), count); + addDataAndHits(blockCache, count); // The below has no asserts. It is just exercising toString and toJSON code. - LOG.info(Objects.toString(cc.getBlockCache().getStats())); - BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(cc.getBlockCache()); + LOG.info(Objects.toString(blockCache.getStats())); + BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(blockCache); LOG.info(Objects.toString(cbsbf)); logPerFile(cbsbf); - bucketCacheReport(cc.getBlockCache()); + bucketCacheReport(blockCache); LOG.info(BlockCacheUtil.toJSON(cbsbf)); } @Test public void testLruBlockCache() throws IOException { - CacheConfig.instantiateBlockCache(this.conf); CacheConfig cc = new CacheConfig(this.conf); - assertTrue(cc.isBlockCacheEnabled()); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); - assertTrue(cc.getBlockCache() instanceof LruBlockCache); - logPerBlock(cc.getBlockCache()); - addDataAndHits(cc.getBlockCache(), 3); + BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); + assertTrue(blockCache instanceof LruBlockCache); + logPerBlock(blockCache); + addDataAndHits(blockCache, 3); // The below has no asserts. It is just exercising toString and toJSON code. - BlockCache bc = cc.getBlockCache(); - LOG.info("count=" + bc.getBlockCount() + ", currentSize=" + bc.getCurrentSize() + - ", freeSize=" + bc.getFreeSize() ); - LOG.info(Objects.toString(cc.getBlockCache().getStats())); - BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(cc.getBlockCache()); + LOG.info("count=" + blockCache.getBlockCount() + ", currentSize=" + blockCache.getCurrentSize() + + ", freeSize=" + blockCache.getFreeSize()); + LOG.info(Objects.toString(blockCache.getStats())); + BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(blockCache); LOG.info(Objects.toString(cbsbf)); logPerFile(cbsbf); - bucketCacheReport(cc.getBlockCache()); + bucketCacheReport(blockCache); LOG.info(BlockCacheUtil.toJSON(cbsbf)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index 7b6bbb3..4c56fff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Threads; -import org.junit.After; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -165,33 +164,25 @@ public class TestCacheConfig { @Before public void setUp() throws Exception { - CacheConfig.clearGlobalInstances(); this.conf = HBaseConfiguration.create(); } - @After - public void tearDown() throws Exception { - // Let go of current block cache. - CacheConfig.clearGlobalInstances(); - } - /** - * @param cc + * @param bc The block cache instance. + * @param cc Cache config. * @param doubling If true, addition of element ups counter by 2, not 1, because element added * to onheap and offheap caches. * @param sizing True if we should run sizing test (doesn't always apply). */ - void basicBlockCacheOps(final CacheConfig cc, final boolean doubling, + void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling, final boolean sizing) { - assertTrue(cc.isBlockCacheEnabled()); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); - BlockCache bc = cc.getBlockCache(); BlockCacheKey bck = new BlockCacheKey("f", 0); Cacheable c = new DataCacheEntry(); // Do asserts on block counting. long initialBlockCount = bc.getBlockCount(); bc.cacheBlock(bck, c, cc.isInMemory()); - assertEquals(doubling? 2: 1, bc.getBlockCount() - initialBlockCount); + assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount); bc.evictBlock(bck); assertEquals(initialBlockCount, bc.getBlockCount()); // Do size accounting. Do it after the above 'warm-up' because it looks like some @@ -209,7 +200,6 @@ public class TestCacheConfig { @Test public void testDisableCacheDataBlock() throws IOException { Configuration conf = HBaseConfiguration.create(); - CacheConfig.instantiateBlockCache(conf); CacheConfig cacheConfig = new CacheConfig(conf); assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); @@ -260,7 +250,7 @@ public class TestCacheConfig { HColumnDescriptor family = new HColumnDescriptor("testDisableCacheDataBlock"); family.setBlockCacheEnabled(false); - cacheConfig = new CacheConfig(conf, family); + cacheConfig = new CacheConfig(conf, family, null); assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA)); assertFalse(cacheConfig.shouldCacheDataCompressed()); @@ -275,12 +265,11 @@ public class TestCacheConfig { @Test public void testCacheConfigDefaultLRUBlockCache() { - CacheConfig.instantiateBlockCache(this.conf); CacheConfig cc = new CacheConfig(this.conf); - assertTrue(cc.isBlockCacheEnabled()); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); - basicBlockCacheOps(cc, false, true); - assertTrue(cc.getBlockCache() instanceof LruBlockCache); + BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); + basicBlockCacheOps(blockCache, cc, false, true); + assertTrue(blockCache instanceof LruBlockCache); } /** @@ -309,18 +298,18 @@ public class TestCacheConfig { private void doBucketCacheConfigTest() { final int bcSize = 100; this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); - CacheConfig.instantiateBlockCache(this.conf); CacheConfig cc = new CacheConfig(this.conf); - basicBlockCacheOps(cc, false, false); - assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); + BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); + basicBlockCacheOps(blockCache, cc, false, false); + assertTrue(blockCache instanceof CombinedBlockCache); // TODO: Assert sizes allocated are right and proportions. - CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache(); - BlockCache [] bcs = cbc.getBlockCaches(); + CombinedBlockCache cbc = (CombinedBlockCache) blockCache; + BlockCache[] bcs = cbc.getBlockCaches(); assertTrue(bcs[0] instanceof LruBlockCache); - LruBlockCache lbc = (LruBlockCache)bcs[0]; + LruBlockCache lbc = (LruBlockCache) bcs[0]; assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize()); assertTrue(bcs[1] instanceof BucketCache); - BucketCache bc = (BucketCache)bcs[1]; + BucketCache bc = (BucketCache) bcs[1]; // getMaxSize comes back in bytes but we specified size in MB assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024)); } @@ -341,12 +330,12 @@ public class TestCacheConfig { long bcExpectedSize = 100 * 1024 * 1024; // MB. assertTrue(lruExpectedSize < bcExpectedSize); this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); - CacheConfig.instantiateBlockCache(this.conf); CacheConfig cc = new CacheConfig(this.conf); - basicBlockCacheOps(cc, false, false); - assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); + BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); + basicBlockCacheOps(blockCache, cc, false, false); + assertTrue(blockCache instanceof CombinedBlockCache); // TODO: Assert sizes allocated are right and proportions. - CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache(); + CombinedBlockCache cbc = (CombinedBlockCache) blockCache; LruBlockCache lbc = cbc.onHeapCache; assertEquals(lruExpectedSize, lbc.getMaxSize()); BlockCache bc = cbc.l2Cache; @@ -382,10 +371,10 @@ public class TestCacheConfig { public void testL2CacheWithInvalidBucketSize() { Configuration c = new Configuration(this.conf); c.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); - c.set(CacheConfig.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096"); + c.set(BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096"); c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024); try { - CacheConfig.getBucketCache(c); + BlockCacheFactory.createBlockCache(c); fail("Should throw IllegalArgumentException when passing illegal value for bucket size"); } catch (IllegalArgumentException e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 9c2f6df..18c7ca0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -31,6 +31,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -160,8 +160,7 @@ public class TestCacheOnWrite { Configuration conf = TEST_UTIL.getConfiguration(); List blockcaches = new ArrayList<>(); // default - CacheConfig.instantiateBlockCache(conf); - blockcaches.add(new CacheConfig(conf).getBlockCache()); + blockcaches.add(BlockCacheFactory.createBlockCache(conf)); //set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287 TEST_UTIL.getConfiguration().setFloat(LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 2.0f); @@ -224,16 +223,17 @@ public class TestCacheOnWrite { conf = TEST_UTIL.getConfiguration(); this.conf.set("dfs.datanode.data.dir.perm", "700"); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE); - conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, - BLOOM_BLOCK_SIZE); + conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE); conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); cowType.modifyConf(conf); + conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.DATA)); + conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, + cowType.shouldBeCached(BlockType.LEAF_INDEX)); + conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, + cowType.shouldBeCached(BlockType.BLOOM_CHUNK)); + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache; + cacheConf = new CacheConfig(conf, blockCache); fs = HFileSystem.get(conf); - cacheConf = - new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), - cowType.shouldBeCached(BlockType.LEAF_INDEX), - cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, - false, false); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 5612c1b..545ef13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -25,14 +25,14 @@ import java.util.Arrays; import java.util.Collection; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -103,24 +103,22 @@ public class TestForceCacheImportantBlocks { @Before public void setup() { - // Make sure we make a new one each time. - CacheConfig.clearGlobalInstances(); HFile.DATABLOCK_READ_COUNT.reset(); - CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration()); + // Make sure we make a new one each time. + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = + BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration()); } @Test public void testCacheBlocks() throws IOException { // Set index block size to be the same as normal block size. TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, BLOCK_SIZE); - HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(CF)).setMaxVersions(MAX_VERSIONS). - setCompressionType(COMPRESSION_ALGORITHM). - setBloomFilterType(BLOOM_TYPE); - hcd.setBlocksize(BLOCK_SIZE); - hcd.setBlockCacheEnabled(cfCacheEnabled); - HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd); - BlockCache cache = region.getStore(hcd.getName()).getCacheConfig().getBlockCache(); - CacheStats stats = cache.getStats(); + ColumnFamilyDescriptor cfd = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CF)).setMaxVersions(MAX_VERSIONS) + .setCompressionType(COMPRESSION_ALGORITHM).setBloomFilterType(BLOOM_TYPE) + .setBlocksize(BLOCK_SIZE).setBlockCacheEnabled(cfCacheEnabled).build(); + HRegion region = TEST_UTIL.createTestRegion(TABLE, cfd); + CacheStats stats = BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE.getStats(); writeTestData(region); assertEquals(0, stats.getHitCount()); assertEquals(0, HFile.DATABLOCK_READ_COUNT.sum()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 7053fce..2a613de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -86,13 +86,14 @@ public class TestHFile { TEST_UTIL.getDataTestDir("TestHFile").toString(); private final int minBlockSize = 512; private static String localFormatter = "%010d"; - private static CacheConfig cacheConf = null; + private static CacheConfig cacheConf; private static Configuration conf ; private static FileSystem fs; @BeforeClass public static void setUp() throws Exception { conf = TEST_UTIL.getConfiguration(); + cacheConf = new CacheConfig(conf); fs = TEST_UTIL.getTestFileSystem(); } @@ -162,7 +163,6 @@ public class TestHFile { */ @Test public void testEmptyHFile() throws IOException { - if (cacheConf == null) cacheConf = new CacheConfig(conf); Path f = new Path(ROOT_DIR, testName.getMethodName()); HFileContext context = new HFileContextBuilder().withIncludesTags(false).build(); Writer w = @@ -179,7 +179,6 @@ public class TestHFile { */ @Test public void testCorrupt0LengthHFile() throws IOException { - if (cacheConf == null) cacheConf = new CacheConfig(conf); Path f = new Path(ROOT_DIR, testName.getMethodName()); FSDataOutputStream fsos = fs.create(f); fsos.close(); @@ -213,7 +212,6 @@ public class TestHFile { */ @Test public void testCorruptTruncatedHFile() throws IOException { - if (cacheConf == null) cacheConf = new CacheConfig(conf); Path f = new Path(ROOT_DIR, testName.getMethodName()); HFileContext context = new HFileContextBuilder().build(); Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f) @@ -315,7 +313,6 @@ public class TestHFile { if (useTags) { conf.setInt("hfile.format.version", 3); } - if (cacheConf == null) cacheConf = new CacheConfig(conf); Path ncHFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags); FSDataOutputStream fout = createFSOutput(ncHFile); HFileContext meta = new HFileContextBuilder() @@ -411,7 +408,6 @@ public class TestHFile { } private void metablocks(final String compress) throws Exception { - if (cacheConf == null) cacheConf = new CacheConfig(conf); Path mFile = new Path(ROOT_DIR, "meta.hfile"); FSDataOutputStream fout = createFSOutput(mFile); HFileContext meta = new HFileContextBuilder() @@ -445,7 +441,6 @@ public class TestHFile { @Test public void testNullMetaBlocks() throws Exception { - if (cacheConf == null) cacheConf = new CacheConfig(conf); for (Compression.Algorithm compressAlgo : HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) { Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index a588341..4de821c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -246,7 +246,7 @@ public class TestHFileBlock { @Test public void testNoCompression() throws IOException { CacheConfig cacheConf = Mockito.mock(CacheConfig.class); - Mockito.when(cacheConf.isBlockCacheEnabled()).thenReturn(false); + Mockito.when(cacheConf.getBlockCache()).thenReturn(null); HFileBlock block = createTestV2Block(NONE, includesMemstoreTS, false).getBlockForCaching(cacheConf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index efe76aa..90f619d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -530,8 +530,7 @@ public class TestHFileBlockIndex { conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); // should open hfile.block.index.cacheonwrite conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true); - CacheConfig.instantiateBlockCache(conf); - CacheConfig cacheConf = new CacheConfig(conf); + CacheConfig cacheConf = new CacheConfig(conf, BlockCacheFactory.createBlockCache(conf)); BlockCache blockCache = cacheConf.getBlockCache(); // Evict all blocks that were cached-on-write by the previous invocation. blockCache.evictBlocksByHfileName(hfilePath.getName()); @@ -589,8 +588,7 @@ public class TestHFileBlockIndex { public void testHFileWriterAndReader() throws IOException { Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "hfile_for_block_index"); - CacheConfig.instantiateBlockCache(conf); - CacheConfig cacheConf = new CacheConfig(conf); + CacheConfig cacheConf = new CacheConfig(conf, BlockCacheFactory.createBlockCache(conf)); BlockCache blockCache = cacheConf.getBlockCache(); for (int testI = 0; testI < INDEX_CHUNK_SIZES.length; ++testI) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java index 4542a3c..c60f7b6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java @@ -82,13 +82,11 @@ public class TestLazyDataBlockDecompression { @Before public void setUp() throws IOException { - CacheConfig.clearGlobalInstances(); fs = FileSystem.get(TEST_UTIL.getConfiguration()); } @After public void tearDown() { - CacheConfig.clearGlobalInstances(); fs = null; } @@ -159,9 +157,8 @@ public class TestLazyDataBlockDecompression { lazyCompressDisabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, false); - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = - new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressDisabled); - CacheConfig cc = new CacheConfig(lazyCompressDisabled); + CacheConfig cc = new CacheConfig(lazyCompressDisabled, + new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressDisabled)); assertFalse(cc.shouldCacheDataCompressed()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); LruBlockCache disabledBlockCache = (LruBlockCache) cc.getBlockCache(); @@ -194,9 +191,8 @@ public class TestLazyDataBlockDecompression { lazyCompressEnabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true); - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = - new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressEnabled); - cc = new CacheConfig(lazyCompressEnabled); + cc = new CacheConfig(lazyCompressEnabled, + new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressEnabled)); assertTrue("test improperly configured.", cc.shouldCacheDataCompressed()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); LruBlockCache enabledBlockCache = (LruBlockCache) cc.getBlockCache(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 811df14..ce23589 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hbase.io.hfile; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Random; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -28,8 +30,9 @@ import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -57,24 +60,25 @@ public class TestPrefetch { private Configuration conf; private CacheConfig cacheConf; private FileSystem fs; + private BlockCache blockCache; @Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); fs = HFileSystem.get(conf); - CacheConfig.blockCacheDisabled = false; - CacheConfig.instantiateBlockCache(conf); - cacheConf = new CacheConfig(conf); + blockCache = BlockCacheFactory.createBlockCache(conf); + cacheConf = new CacheConfig(conf, blockCache); } @Test public void testPrefetchSetInHCDWorks() { - HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("f")); - hcd.setPrefetchBlocksOnOpen(true); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).setPrefetchBlocksOnOpen(true) + .build(); Configuration c = HBaseConfiguration.create(); assertFalse(c.getBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false)); - CacheConfig cc = new CacheConfig(c, hcd); + CacheConfig cc = new CacheConfig(c, columnFamilyDescriptor, blockCache); assertTrue(cc.shouldPrefetchOnOpen()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java index 18e8e70..4c5c5a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java @@ -88,7 +88,7 @@ public class TestScannerFromBucketCache { conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f); } tableName = TableName.valueOf(name.getMethodName()); - CacheConfig.instantiateBlockCache(conf); + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = BlockCacheFactory.createBlockCache(conf); } @After @@ -96,7 +96,6 @@ public class TestScannerFromBucketCache { EnvironmentEdgeManagerTestHelper.reset(); LOG.info("Cleaning test directory: " + test_util.getDataTestDir()); test_util.cleanupTestDir(); - CacheConfig.clearGlobalInstances(); } String getName() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java index d27b041..ed440e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java @@ -123,10 +123,7 @@ public class TestScannerSelectionUsingKeyRange { } Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz")); - CacheConfig.blockCacheDisabled = false; - CacheConfig.instantiateBlockCache(conf); - CacheConfig cacheConf = new CacheConfig(conf); - LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache(); + LruBlockCache cache = (LruBlockCache) BlockCacheFactory.createBlockCache(conf); cache.clearCache(); InternalScanner scanner = region.getScanner(scan); List results = new ArrayList<>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 444102d..c3fcd41 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -28,12 +28,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -104,17 +106,16 @@ public class TestScannerSelectionUsingTTL { @Test public void testScannerSelection() throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); - CacheConfig.instantiateBlockCache(conf); conf.setBoolean("hbase.store.delete.expired.storefile", false); - HColumnDescriptor hcd = - new HColumnDescriptor(FAMILY_BYTES) - .setMaxVersions(Integer.MAX_VALUE) - .setTimeToLive(TTL_SECONDS); - HTableDescriptor htd = new HTableDescriptor(TABLE); - htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(TABLE); - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, - TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd); + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = BlockCacheFactory.createBlockCache(conf); + LruBlockCache cache = (LruBlockCache) BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE; + + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES).setMaxVersions(Integer.MAX_VALUE) + .setTimeToLive(TTL_SECONDS).build()).build(); + RegionInfo info = RegionInfoBuilder.newBuilder(TABLE).build(); + HRegion region = HBaseTestingUtility + .createRegionAndWAL(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, td); long ts = EnvironmentEdgeManager.currentTime(); long version = 0; //make sure each new set of Put's have a new ts @@ -136,10 +137,7 @@ public class TestScannerSelectionUsingTTL { version++; } - Scan scan = new Scan(); - scan.setMaxVersions(Integer.MAX_VALUE); - CacheConfig cacheConf = new CacheConfig(conf); - LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache(); + Scan scan = new Scan().readVersions(Integer.MAX_VALUE); cache.clearCache(); InternalScanner scanner = region.getScanner(scan); List results = new ArrayList<>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index f4c2a33..b466515 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -48,8 +48,10 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.locking.EntityLock; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.quotas.RegionSizeStore; @@ -714,4 +716,14 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, public TableDescriptors getTableDescriptors() { return null; } + + @Override + public BlockCache getBlockCache() { + return null; + } + + @Override + public MobFileCache getMobFileCache() { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNotCarryTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNotCarryTable.java index 75d9019..a056c87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNotCarryTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNotCarryTable.java @@ -79,7 +79,7 @@ public class TestMasterNotCarryTable { @Test public void testMasterBlockCache() { // no need to instantiate block cache. - assertNull(master.getCacheConfig().getBlockCache()); + assertNull(master.getBlockCache()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java index 325a580..bd0356f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java @@ -58,7 +58,6 @@ public class TestMobFileCache { private HRegion region; private Configuration conf; private MobCacheConfig mobCacheConf; - private MobFileCache mobFileCache; private Date currentDate = new Date(); private static final String TEST_CACHE_SIZE = "2"; private static final int EXPECTED_CACHE_SIZE_ZERO = 0; @@ -85,6 +84,8 @@ public class TestMobFileCache { public void setUp() throws Exception { UTIL = HBaseTestingUtility.createLocalHTU(); conf = UTIL.getConfiguration(); + conf.set(MobConstants.MOB_FILE_CACHE_SIZE_KEY, TEST_CACHE_SIZE); + MobFileCache.GLOBAL_MOB_FILE_CACHE_INSTANCE = new MobFileCache(conf); HTableDescriptor htd = UTIL.createTableDescriptor("testMobFileCache"); HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILY1); hcd1.setMobEnabled(true); @@ -121,7 +122,7 @@ public class TestMobFileCache { HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setMaxVersions(4); hcd.setMobEnabled(true); - mobCacheConf = new MobCacheConfig(conf, hcd); + mobCacheConf = new MobCacheConfig(conf, hcd, null, MobFileCache.GLOBAL_MOB_FILE_CACHE_INSTANCE); return createMobStoreFile(hcd); } @@ -158,12 +159,11 @@ public class TestMobFileCache { @Test public void testMobFileCache() throws Exception { FileSystem fs = FileSystem.get(conf); - conf.set(MobConstants.MOB_FILE_CACHE_SIZE_KEY, TEST_CACHE_SIZE); - mobFileCache = new MobFileCache(conf); Path file1Path = createMobStoreFile(FAMILY1); Path file2Path = createMobStoreFile(FAMILY2); Path file3Path = createMobStoreFile(FAMILY3); + MobFileCache mobFileCache = MobFileCache.GLOBAL_MOB_FILE_CACHE_INSTANCE; // Before open one file by the MobFileCache assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize()); // Open one file by the MobFileCache diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index 2cf3f8c..bf71dbc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -30,17 +30,17 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -66,7 +66,6 @@ public class TestBlocksRead { static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL, BloomType.ROW, BloomType.NONE }; - private static BlockCache blockCache; HRegion region = null; private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final String DIR = TEST_UTIL.getDataTestDir("TestBlocksRead").toString(); @@ -76,7 +75,6 @@ public class TestBlocksRead { public static void setUp() throws Exception { // disable compactions in this test. TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10000); - CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration()); } @AfterClass @@ -93,22 +91,20 @@ public class TestBlocksRead { * @throws IOException * @return created and initialized region. */ - private HRegion initHRegion(byte[] tableName, String callingMethod, - Configuration conf, String family) throws IOException { - HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); - HColumnDescriptor familyDesc; + private HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, + String family) throws IOException { + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = BlockCacheFactory.createBlockCache(conf); + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); for (int i = 0; i < BLOOM_TYPE.length; i++) { BloomType bloomType = BLOOM_TYPE[i]; - familyDesc = new HColumnDescriptor(family + "_" + bloomType) - .setBlocksize(1) - .setBloomFilterType(BLOOM_TYPE[i]); - htd.addFamily(familyDesc); + builder.setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family + "_" + bloomType)) + .setBlocksize(1).setBloomFilterType(bloomType).build()); } - - HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); + RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); Path path = new Path(DIR + callingMethod); - HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); - blockCache = new CacheConfig(conf).getBlockCache(); + HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, builder.build()); return r; } @@ -205,7 +201,7 @@ public class TestBlocksRead { } private static long getBlkCount() { - return blockCache.getBlockCount(); + return BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE.getBlockCount(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index 0ba4e97..56533ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -62,7 +62,8 @@ public class TestBlocksScanned extends HBaseTestCase { public void setUp() throws Exception { super.setUp(); TEST_UTIL = new HBaseTestingUtility(); - CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration()); + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = + BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration()); } @Test @@ -101,7 +102,7 @@ public class TestBlocksScanned extends HBaseTestCase { addContent(r, FAMILY, COL); r.flush(true); - CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats(); + CacheStats stats = BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE.getStats(); long before = stats.getHitCount() + stats.getMissCount(); // Do simple test of getting one row only first. Scan scan = new Scan().withStartRow(Bytes.toBytes("aaa")).withStopRow(Bytes.toBytes("aaz")) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index dc51dae..71ae8ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -161,8 +162,8 @@ public class TestCacheOnWriteInSchema { conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false); conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false); - CacheConfig.instantiateBlockCache(conf); + BlockCacheFactory.GLOBAL_BLOCK_CACHE_INSTANCE = BlockCacheFactory.createBlockCache(conf); fs = HFileSystem.get(conf); // Create the schema diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java index 47fc2d7..b85cc21 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java @@ -100,8 +100,8 @@ public class TestClearRegionBlockCache { @Test public void testClearBlockCache() throws Exception { - BlockCache blockCache1 = rs1.getCacheConfig().getBlockCache(); - BlockCache blockCache2 = rs2.getCacheConfig().getBlockCache(); + BlockCache blockCache1 = rs1.getBlockCache(); + BlockCache blockCache2 = rs2.getBlockCache(); long initialBlockCount1 = blockCache1.getBlockCount(); long initialBlockCount2 = blockCache2.getBlockCount(); @@ -125,23 +125,24 @@ public class TestClearRegionBlockCache { public void testClearBlockCacheFromAdmin() throws Exception { Admin admin = HTU.getAdmin(); - // All RS run in a same process, so the block cache is same for rs1 and rs2 - BlockCache blockCache = rs1.getCacheConfig().getBlockCache(); - long initialBlockCount = blockCache.getBlockCount(); + BlockCache blockCache1 = rs1.getBlockCache(); + BlockCache blockCache2 = rs2.getBlockCache(); + long initialBlockCount1 = blockCache1.getBlockCount(); + long initialBlockCount2 = blockCache2.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); - assertEquals(blockCache.getBlockCount() - initialBlockCount, - HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); + assertEquals(blockCache1.getBlockCount() - initialBlockCount1, + HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); scanAllRegionsForRS(rs2); - assertEquals(blockCache.getBlockCount() - initialBlockCount, - HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) - + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); + assertEquals(blockCache2.getBlockCount() - initialBlockCount2, + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME); assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); - assertEquals(initialBlockCount, blockCache.getBlockCount()); + assertEquals(initialBlockCount1, blockCache1.getBlockCount()); + assertEquals(initialBlockCount2, blockCache2.getBlockCount()); } @Test @@ -149,23 +150,24 @@ public class TestClearRegionBlockCache { AsyncAdmin admin = ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get().getAdmin(); - // All RS run in a same process, so the block cache is same for rs1 and rs2 - BlockCache blockCache = rs1.getCacheConfig().getBlockCache(); - long initialBlockCount = blockCache.getBlockCount(); + BlockCache blockCache1 = rs1.getBlockCache(); + BlockCache blockCache2 = rs2.getBlockCache(); + long initialBlockCount1 = blockCache1.getBlockCount(); + long initialBlockCount2 = blockCache2.getBlockCount(); // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); - assertEquals(blockCache.getBlockCount() - initialBlockCount, - HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); + assertEquals(blockCache1.getBlockCount() - initialBlockCount1, + HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); scanAllRegionsForRS(rs2); - assertEquals(blockCache.getBlockCount() - initialBlockCount, - HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) - + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); + assertEquals(blockCache2.getBlockCount() - initialBlockCount2, + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get(); - assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) - + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); - assertEquals(initialBlockCount, blockCache.getBlockCount()); + assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU + .getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); + assertEquals(initialBlockCount1, blockCache1.getBlockCount()); + assertEquals(initialBlockCount2, blockCache2.getBlockCount()); } private void scanAllRegionsForRS(HRegionServer rs) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index 4c2d645..584d3eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -40,7 +39,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter; import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter; @@ -122,7 +120,6 @@ public class TestCompoundBloomFilter { private static Configuration conf; private static CacheConfig cacheConf; private FileSystem fs; - private BlockCache blockCache; /** A message of the form "in test#<number>:" to include in logging. */ private String testIdMsg; @@ -138,11 +135,7 @@ public class TestCompoundBloomFilter { conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION); fs = FileSystem.get(conf); - - CacheConfig.instantiateBlockCache(conf); cacheConf = new CacheConfig(conf); - blockCache = cacheConf.getBlockCache(); - assertNotNull(blockCache); } private List createSortedKeyValues(Random rand, int n) { @@ -373,7 +366,5 @@ public class TestCompoundBloomFilter { rowColKV.getRowLength())); assertEquals(0, rowKV.getQualifierLength()); } - - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 9c5a667..32d41bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -32,6 +32,7 @@ import java.util.NavigableSet; import java.util.Optional; import java.util.concurrent.ConcurrentSkipListSet; import javax.crypto.spec.SecretKeySpec; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -42,20 +43,24 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.mob.MobConstants; +import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; @@ -90,7 +95,6 @@ public class TestHMobStore { private HMobStore store; private HRegion region; - private HColumnDescriptor hcd; private FileSystem fs; private byte [] table = Bytes.toBytes("table"); private byte [] family = Bytes.toBytes("family"); @@ -131,49 +135,44 @@ public class TestHMobStore { byte [] next = iter.next(); expected.add(new KeyValue(row, family, next, 1, value)); get.addColumn(family, next); - get.setMaxVersions(); // all versions. + get.readAllVersions(); } } - private void init(String methodName, Configuration conf, boolean testStore) - throws IOException { - hcd = new HColumnDescriptor(family); - hcd.setMobEnabled(true); - hcd.setMobThreshold(3L); - hcd.setMaxVersions(4); - init(methodName, conf, hcd, testStore); + private void init(String methodName, Configuration conf, boolean testStore) throws IOException { + ColumnFamilyDescriptor cfd = + ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true).setMobThreshold(3L) + .setMaxVersions(4).build(); + init(methodName, conf, cfd, testStore); } - private void init(String methodName, Configuration conf, - HColumnDescriptor hcd, boolean testStore) throws IOException { - HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); - init(methodName, conf, htd, hcd, testStore); - } + private void init(String methodName, Configuration conf, ColumnFamilyDescriptor cfd, + boolean testStore) throws IOException { + TableDescriptor td = + TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setColumnFamily(cfd).build(); - private void init(String methodName, Configuration conf, HTableDescriptor htd, - HColumnDescriptor hcd, boolean testStore) throws IOException { //Setting up tje Region and Store - Path basedir = new Path(DIR+methodName); - Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName()); + Path basedir = new Path(DIR + methodName); + Path tableDir = FSUtils.getTableDir(basedir, td.getTableName()); String logName = "logs"; Path logdir = new Path(basedir, logName); FileSystem fs = FileSystem.get(conf); fs.delete(logdir, true); - htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); + RegionInfo info = RegionInfoBuilder.newBuilder(td.getTableName()).build(); ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, basedir); final WALFactory wals = new WALFactory(walConf, methodName); - region = new HRegion(tableDir, wals.getWAL(info), fs, conf, info, htd, null); - store = new HMobStore(region, hcd, conf); - if(testStore) { - init(conf, hcd); + region = new HRegion(tableDir, wals.getWAL(info), fs, conf, info, td, null); + MobFileCache.GLOBAL_MOB_FILE_CACHE_INSTANCE = new MobFileCache(conf); + store = new HMobStore(region, cfd, conf); + if (testStore) { + init(conf, cfd); } } - private void init(Configuration conf, HColumnDescriptor hcd) + private void init(Configuration conf, ColumnFamilyDescriptor cfd) throws IOException { Path basedir = FSUtils.getRootDir(conf); fs = FileSystem.get(conf); @@ -187,7 +186,7 @@ public class TestHMobStore { KeyValue[] keys = new KeyValue[] { key1, key2, key3 }; int maxKeyCount = keys.length; StoreFileWriter mobWriter = store.createWriterInTmp(currentDate, maxKeyCount, - hcd.getCompactionCompressionType(), region.getRegionInfo().getStartKey(), false); + cfd.getCompactionCompressionType(), region.getRegionInfo().getStartKey(), false); mobFilePath = mobWriter.getPath(); mobWriter.append(key1); @@ -382,15 +381,11 @@ public class TestHMobStore { */ @Test public void testMobCellSizeThreshold() throws IOException { - final Configuration conf = HBaseConfiguration.create(); - - HColumnDescriptor hcd; - hcd = new HColumnDescriptor(family); - hcd.setMobEnabled(true); - hcd.setMobThreshold(100); - hcd.setMaxVersions(4); - init(name.getMethodName(), conf, hcd, false); + ColumnFamilyDescriptor cfd = + ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true).setMobThreshold(100) + .setMaxVersions(4).build(); + init(name.getMethodName(), conf, cfd, false); //Put data in memstore this.store.add(new KeyValue(row, family, qf1, 1, value), null); @@ -503,15 +498,12 @@ public class TestHMobStore { String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key cfKey = new SecretKeySpec(keyBytes, algorithm); - HColumnDescriptor hcd = new HColumnDescriptor(family); - hcd.setMobEnabled(true); - hcd.setMobThreshold(100); - hcd.setMaxVersions(4); - hcd.setEncryptionType(algorithm); - hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()),cfKey)); - - init(name.getMethodName(), conf, hcd, false); + ColumnFamilyDescriptor cfd = + ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true).setMobThreshold(100) + .setMaxVersions(4).setEncryptionType(algorithm).setEncryptionKey(EncryptionUtil + .wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, + User.getCurrent().getShortName()), cfKey)).build(); + init(name.getMethodName(), conf, cfd, false); this.store.add(new KeyValue(row, family, qf1, 1, value), null); this.store.add(new KeyValue(row, family, qf2, 1, value), null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index 30ee3b2..ac63974 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -91,7 +92,7 @@ public class TestHStoreFile extends HBaseTestCase { private static final Logger LOG = LoggerFactory.getLogger(TestHStoreFile.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); + private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); private static String ROOT_DIR = TEST_UTIL.getDataTestDir("TestStoreFile").toString(); private static final ChecksumType CKTYPE = ChecksumType.CRC32C; private static final int CKBYTES = 512; @@ -934,8 +935,7 @@ public class TestHStoreFile extends HBaseTestCase { Path baseDir = new Path(new Path(testDir, "7e0102"),"twoCOWEOC"); // Grab the block cache and get the initial hit/miss counts - CacheConfig.instantiateBlockCache(conf); - BlockCache bc = new CacheConfig(conf).getBlockCache(); + BlockCache bc = BlockCacheFactory.createBlockCache(conf); assertNotNull(bc); CacheStats cs = bc.getStats(); long startHit = cs.getHitCount(); @@ -944,7 +944,7 @@ public class TestHStoreFile extends HBaseTestCase { // Let's write a StoreFile with three blocks, with cache on write off conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); - CacheConfig cacheConf = new CacheConfig(conf); + CacheConfig cacheConf = new CacheConfig(conf, bc); Path pathCowOff = new Path(baseDir, "123456789"); StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3); HStoreFile hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, @@ -967,7 +967,7 @@ public class TestHStoreFile extends HBaseTestCase { // Now write a StoreFile with three blocks, with cache on write on conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); - cacheConf = new CacheConfig(conf); + cacheConf = new CacheConfig(conf, bc); Path pathCowOn = new Path(baseDir, "123456788"); writer = writeStoreFile(conf, cacheConf, pathCowOn, 3); hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, @@ -1025,7 +1025,7 @@ public class TestHStoreFile extends HBaseTestCase { // Let's close the first file with evict on close turned on conf.setBoolean("hbase.rs.evictblocksonclose", true); - cacheConf = new CacheConfig(conf); + cacheConf = new CacheConfig(conf, bc); hsf = new HStoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE, true); hsf.initReader(); reader = hsf.getReader(); @@ -1039,7 +1039,7 @@ public class TestHStoreFile extends HBaseTestCase { // Let's close the second file with evict on close turned off conf.setBoolean("hbase.rs.evictblocksonclose", false); - cacheConf = new CacheConfig(conf); + cacheConf = new CacheConfig(conf, bc); hsf = new HStoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE, true); hsf.initReader(); reader = hsf.getReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java index cc9e385..974a05e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -58,14 +59,13 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.mob.MobConstants; +import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -101,16 +101,6 @@ public class TestMobStoreCompaction { private final byte[] STARTROW = Bytes.toBytes(START_KEY); private int compactionThreshold; - @BeforeClass - public static void setUpBeforeClass() throws Exception { - UTIL.startMiniCluster(1); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - UTIL.shutdownMiniCluster(); - } - private void init(Configuration conf, long mobThreshold) throws Exception { this.conf = conf; this.mobCellThreshold = mobThreshold; @@ -124,6 +114,7 @@ public class TestMobStoreCompaction { hcd.setMaxVersions(1); htd.modifyFamily(hcd); + MobFileCache.GLOBAL_MOB_FILE_CACHE_INSTANCE = new MobFileCache(conf); region = UTIL.createLocalHRegion(htd, null, null); fs = FileSystem.get(conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index 17b01b5..0430bf0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.MetricsHBaseServer; import org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapperStub; import org.apache.hadoop.hbase.ipc.RpcServerInterface; @@ -102,11 +101,9 @@ public class TestRSStatusServlet { Mockito.doReturn("fakequorum").when(zkw).getQuorum(); Mockito.doReturn(zkw).when(rs).getZooKeeper(); - // Fake CacheConfig + // Fake BlockCache LOG.warn("The " + HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " is set to 0"); - CacheConfig cacheConf = Mockito.mock(CacheConfig.class); - Mockito.doReturn(null).when(cacheConf).getBlockCache(); - Mockito.doReturn(cacheConf).when(rs).getCacheConfig(); + Mockito.doReturn(null).when(rs).getBlockCache(); // Fake MasterAddressTracker MasterAddressTracker mat = Mockito.mock(MasterAddressTracker.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index 543126e..34f6ca1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -81,7 +80,6 @@ public class TestRecoveredEdits { @Test public void testReplayWorksThoughLotsOfFlushing() throws IOException { - CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration()); for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) { testReplayWorksWithMemoryCompactionPolicy(policy); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java index 61de21f..647f450 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java @@ -68,7 +68,7 @@ public class TestRowPrefixBloomFilter { private static final Logger LOG = LoggerFactory.getLogger(TestRowPrefixBloomFilter.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); + private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); private static final ChecksumType CKTYPE = ChecksumType.CRC32C; private static final int CKBYTES = 512; private boolean localfs = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 75ebfd3..eb25806 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -214,7 +214,7 @@ public class TestSecureBulkLoadManager { ColumnFamilyDescriptor family = desc.getColumnFamily(FAMILY); Compression.Algorithm compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; - CacheConfig writerCacheConf = new CacheConfig(conf, family); + CacheConfig writerCacheConf = new CacheConfig(conf, family, null); writerCacheConf.setCacheDataOnWrite(false); HFileContext hFileContext = new HFileContextBuilder() .withIncludesMvcc(false) -- 2.7.4