diff --git a/conf/hbase-env.cmd b/conf/hbase-env.cmd
index 8c8597e..d293f4d 100644
--- a/conf/hbase-env.cmd
+++ b/conf/hbase-env.cmd
@@ -29,6 +29,13 @@
@rem Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
@rem offheap, set the value to "8G".
+@rem HBase by default enable off heap Bucket Cache with
+@rem size as 40% of max heap size. Consider that also while setting below value. Also
+@rem HBase allocates an off heap ByteBuffer pool at every server. This consists of buffers
+@rem with default 64 KB size each and max upto 1920 buffers by default. All these configurable.
+@rem Refer the configs 'hbase.ipc.server.reservoir.enabled',
+@rem 'hbase.ipc.server.reservoir.initial.buffer.size',
+@rem 'hbase.ipc.server.reservoir.initial.max', 'hbase.regionserver.handler.count'.
@rem set HBASE_OFFHEAPSIZE=1000
@rem For example, to allocate 8G of offheap, to 8G:
diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh
index d9879c6..290cdf3 100644
--- a/conf/hbase-env.sh
+++ b/conf/hbase-env.sh
@@ -35,6 +35,13 @@
# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
# offheap, set the value to "8G".
+# HBase by default enable off heap Bucket Cache with
+# size as 40% of max heap size. Consider that also while setting below value. Also
+# HBase allocates an off heap ByteBuffer pool at every server. This consists of buffers
+# with default 64 KB size each and max upto 1920 buffers by default. All these configurable.
+# Refer the configs 'hbase.ipc.server.reservoir.enabled',
+# 'hbase.ipc.server.reservoir.initial.buffer.size',
+# 'hbase.ipc.server.reservoir.initial.max', 'hbase.regionserver.handler.count'.
# export HBASE_OFFHEAPSIZE=1G
# Extra Java runtime options.
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 14ce089..f7755c0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1224,9 +1224,8 @@ public final class HConstants {
// hbase-common?
/**
- * Current ioengine options in include: heap, offheap and file:PATH (where PATH is the path
- * to the file that will host the file-based cache. See BucketCache#getIOEngineFromName() for
- * list of supported ioengine options.
+ * Current ioengine options include: offheap, file, files or mmap.
+ * See BucketCache#getIOEngineFromName() for list of supported ioengine options.
*
Set this option and a non-zero {@link #BUCKET_CACHE_SIZE_KEY} to enable bucket cache.
*/
public static final String BUCKET_CACHE_IOENGINE_KEY = "hbase.bucketcache.ioengine";
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index 7995c41..ac47188 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -893,7 +893,7 @@ possible configurations would overwhelm and obscure the important.
hbase.bucketcache.ioengine
-
+ offheap
Where to store the contents of the bucketcache. One of: offheap,
file, files or mmap. If a file or files, set it to file(s):PATH_TO_FILE.
mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
@@ -903,9 +903,12 @@ possible configurations would overwhelm and obscure the important.
hbase.bucketcache.size
- A float that EITHER represents a percentage of total heap memory
- size to give to the cache (if < 1.0) OR, it is the total capacity in
- megabytes of BucketCache. Default: 0.0
+ A float value for total capacity (in megabytes) of BucketCache.
+ Default: By default offheap BucketCache is enabled and size will be calculated
+ as 40% of max heap memory size for the server.
+ Note: When to disable BucketCache, configure this as 0. No value for this config
+ will be treated as the default value. ie.40% of Xmx size.
+
hbase.bucketcache.bucket.sizes
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index a071fbd..f9a0cd9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.io.hfile;
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
-import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
import java.io.IOException;
@@ -123,7 +122,9 @@ public class CacheConfig {
public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3;
public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64;
- /**
+ public static final String DEFAULT_BUCKET_CACHE_IOENGINE = "offheap";
+
+ /**
* Configuration key to prefetch all blocks of a given file into the block cache
* when the file is opened.
*/
@@ -217,7 +218,7 @@ public class CacheConfig {
* @param family column family configuration
*/
public CacheConfig(Configuration conf, ColumnFamilyDescriptor family) {
- this(CacheConfig.instantiateBlockCache(conf),
+ this(getOrInstantiateBlockCache(conf),
conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ)
&& family.isBlockCacheEnabled(),
family.isInMemory(),
@@ -247,7 +248,7 @@ public class CacheConfig {
* @param conf hbase configuration
*/
public CacheConfig(Configuration conf) {
- this(CacheConfig.instantiateBlockCache(conf),
+ this(getOrInstantiateBlockCache(conf),
conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ),
DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set
// strictly from conf
@@ -586,16 +587,15 @@ public class CacheConfig {
}
@VisibleForTesting
- static BucketCache getBucketCache(Configuration c) {
+ static BucketCache getBucketCache(Configuration c, boolean atMaster) {
// Check for L2. ioengine name must be non-null.
- String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null);
- if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null;
+ String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY,
+ DEFAULT_BUCKET_CACHE_IOENGINE);
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
- final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c);
+ final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c, atMaster);
if (bucketCacheSize <= 0) {
- throw new IllegalStateException("bucketCacheSize <= 0; Check " +
- BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size");
+ return null;
}
if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) {
LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer "
@@ -639,6 +639,16 @@ public class CacheConfig {
return bucketCache;
}
+ private static synchronized BlockCache getOrInstantiateBlockCache(Configuration conf) {
+ if (GLOBAL_BLOCK_CACHE_INSTANCE != null) {
+ return GLOBAL_BLOCK_CACHE_INSTANCE;
+ }
+ if (blockCacheDisabled) {
+ return null;
+ }
+ return instantiateBlockCache(conf, false);
+ }
+
/**
* Returns the block cache or null in case none should be used.
* Sets GLOBAL_BLOCK_CACHE_INSTANCE
@@ -646,7 +656,8 @@ public class CacheConfig {
* @param conf The current configuration.
* @return The block cache or null.
*/
- public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
+ public static synchronized BlockCache instantiateBlockCache(Configuration conf,
+ boolean atMaster) {
if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
if (blockCacheDisabled) return null;
LruBlockCache onHeapCache = getOnHeapCacheInternal(conf);
@@ -660,7 +671,7 @@ public class CacheConfig {
: new InclusiveCombinedBlockCache(onHeapCache, L2_CACHE_INSTANCE);
} else {
// otherwise use the bucket cache.
- L2_CACHE_INSTANCE = getBucketCache(conf);
+ L2_CACHE_INSTANCE = getBucketCache(conf, atMaster);
if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) {
// Non combined mode is off from 2.0
LOG.warn(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index ac842f6..3b940dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -546,7 +546,7 @@ public class BucketCache implements BlockCache, HeapSize {
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
- return evictBlock(cacheKey, true);
+ return evictBlock(cacheKey, false);
}
// does not check for the ref count. Just tries to evict it if found in the
@@ -559,7 +559,6 @@ public class BucketCache implements BlockCache, HeapSize {
BucketEntry bucketEntry = backingMap.get(cacheKey);
if (bucketEntry == null) {
if (removedBlock != null) {
- cacheStats.evicted(0, cacheKey.isPrimary());
return true;
} else {
return false;
@@ -576,7 +575,6 @@ public class BucketCache implements BlockCache, HeapSize {
} finally {
lock.writeLock().unlock();
}
- cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
return true;
}
@@ -589,7 +587,7 @@ public class BucketCache implements BlockCache, HeapSize {
return removedBlock;
}
- public boolean evictBlock(BlockCacheKey cacheKey, boolean deletedBlock) {
+ public boolean evictBlock(BlockCacheKey cacheKey, boolean evictedByEvictionProcess) {
if (!cacheEnabled) {
return false;
}
@@ -597,7 +595,9 @@ public class BucketCache implements BlockCache, HeapSize {
BucketEntry bucketEntry = backingMap.get(cacheKey);
if (bucketEntry == null) {
if (removedBlock != null) {
- cacheStats.evicted(0, cacheKey.isPrimary());
+ if (evictedByEvictionProcess) {
+ cacheStats.evicted(0, cacheKey.isPrimary());
+ }
return true;
} else {
return false;
@@ -607,14 +607,14 @@ public class BucketCache implements BlockCache, HeapSize {
try {
lock.writeLock().lock();
int refCount = bucketEntry.refCount.get();
- if(refCount == 0) {
+ if (refCount == 0) {
if (backingMap.remove(cacheKey, bucketEntry)) {
blockEvicted(cacheKey, bucketEntry, removedBlock == null);
} else {
return false;
}
} else {
- if(!deletedBlock) {
+ if (evictedByEvictionProcess) {
if (LOG.isDebugEnabled()) {
LOG.debug("This block " + cacheKey + " is still referred by " + refCount
+ " readers. Can not be freed now");
@@ -632,7 +632,9 @@ public class BucketCache implements BlockCache, HeapSize {
} finally {
lock.writeLock().unlock();
}
- cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
+ if (evictedByEvictionProcess) {
+ cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
+ }
return true;
}
@@ -734,7 +736,7 @@ public class BucketCache implements BlockCache, HeapSize {
for (Map.Entry entry : backingMap.entrySet()) {
if (candidateBuckets.contains(bucketAllocator
.getBucketIndex(entry.getValue().offset()))) {
- evictBlock(entry.getKey(), false);
+ evictBlock(entry.getKey(), true);
}
}
}
@@ -1367,7 +1369,7 @@ public class BucketCache implements BlockCache, HeapSize {
// TODO avoid a cycling siutation. We find no block which is not in use and so no way to free
// What to do then? Caching attempt fail? Need some changes in cacheBlock API?
while ((entry = queue.pollLast()) != null) {
- if (evictBlock(entry.getKey(), false)) {
+ if (evictBlock(entry.getKey(), true)) {
freedBytes += entry.getValue().getLength();
}
if (freedBytes >= toFree) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
index 1f2025278..5f1e36f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
@@ -55,6 +55,9 @@ public class MemorySizeUtil {
// a constant to convert a fraction to a percentage
private static final int CONVERT_TO_PERCENTAGE = 100;
+ private static final float DEFAULT_OFFHEAP_BUCKET_CACHE_SIZE_PERCENT = 0.4F;
+ private static final long ONE_MB = 1024 * 1024;
+
private static final String JVM_HEAP_EXCEPTION = "Got an exception while attempting to read " +
"information about the JVM heap. Please submit this log information in a bug report and " +
"include your JVM settings, specifically the GC in use and any -XX options. Consider " +
@@ -234,17 +237,30 @@ public class MemorySizeUtil {
}
/**
- * @param conf used to read config for bucket cache size. (< 1 is treated as % and > is treated as MiB)
- * @return the number of bytes to use for bucket cache, negative if disabled.
+ * @param conf used to read config for bucket cache size.
+ * @return the number of bytes to use for bucket cache, negative or 0 if disabled.
*/
- public static long getBucketCacheSize(final Configuration conf) {
- // Size configured in MBs
- float bucketCacheSize = conf.getFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F);
- if (bucketCacheSize < 1) {
- throw new IllegalArgumentException("Bucket Cache should be minimum 1 MB in size."
- + "Configure 'hbase.bucketcache.size' with > 1 value");
+ public static long getBucketCacheSize(final Configuration conf, boolean atMaster) {
+ float bucketCacheSize = 0.0F;
+ String configuredBCSize = conf.get(HConstants.BUCKET_CACHE_SIZE_KEY);
+ if (atMaster && (configuredBCSize == null || configuredBCSize.isEmpty())) {
+ // At HM side BC is not default ON.
+ return (long) (bucketCacheSize);
+ }
+ if (configuredBCSize == null || configuredBCSize.isEmpty()) {
+ // Nothing is been configured by user. The default what we take is the 40% of Xmx size.
+ final MemoryUsage usage = safeGetHeapMemoryUsage();
+ if (usage != null) {
+ long xmx = usage.getMax();
+ bucketCacheSize = xmx * DEFAULT_OFFHEAP_BUCKET_CACHE_SIZE_PERCENT;
+ LOG.info("'hbase.bucketcache.size' is not configured. So going with the default ie. "
+ + (DEFAULT_OFFHEAP_BUCKET_CACHE_SIZE_PERCENT * 100) + "% of Xmx = " + bucketCacheSize);
+ }
+ } else {
+ // Size configured in MBs
+ bucketCacheSize = Float.valueOf(configuredBCSize) * ONE_MB;
}
- return (long) (bucketCacheSize * 1024 * 1024);
+ return (long) (bucketCacheSize);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 1c57620..db69ba8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
import org.apache.hadoop.hbase.http.InfoServer;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
@@ -125,6 +126,7 @@ import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
import org.apache.hadoop.hbase.master.replication.ReplicationManager;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobCacheConfig;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -537,6 +539,13 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
+ protected void initCacheConfigs(Configuration conf) {
+ CacheConfig.instantiateBlockCache(conf, true);
+ cacheConfig = new CacheConfig(conf);
+ // TODO no need to make this cache in HM side. Check side effects if any and handle.
+ mobCacheConfig = new MobCacheConfig(conf);
+ }
+
// Main run loop. Calls through to the regionserver run loop.
@Override
public void run() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index cb0632d..3decbfe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -402,7 +402,7 @@ public class HRegionServer extends HasThread implements
// Cache configuration and block cache reference
protected CacheConfig cacheConfig;
// Cache configuration for mob
- final MobCacheConfig mobCacheConfig;
+ protected MobCacheConfig mobCacheConfig;
/** The health check chore. */
private HealthCheckChore healthCheckChore;
@@ -598,8 +598,7 @@ public class HRegionServer extends HasThread implements
Superusers.initialize(conf);
regionServerAccounting = new RegionServerAccounting(conf);
- cacheConfig = new CacheConfig(conf);
- mobCacheConfig = new MobCacheConfig(conf);
+ initCacheConfigs(conf);
uncaughtExceptionHandler = new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
@@ -647,6 +646,12 @@ public class HRegionServer extends HasThread implements
}
}
+ protected void initCacheConfigs(Configuration conf) {
+ CacheConfig.instantiateBlockCache(conf, false);
+ cacheConfig = new CacheConfig(conf);
+ mobCacheConfig = new MobCacheConfig(conf);
+ }
+
/**
* If running on Windows, do windows-specific setup.
*/
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
index 5c1baca..23dc1cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -103,11 +104,12 @@ public class TestEncodedSeekers {
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
+ includeTags + ", compressTags : " + compressTags);
+ Configuration conf = new Configuration(testUtil.getConfiguration());
+ conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F);
if(includeTags) {
- testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
+ conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
}
- LruBlockCache cache =
- (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
+ LruBlockCache cache = (LruBlockCache) new CacheConfig(conf).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
index dab8673..2959336 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
@@ -96,7 +96,9 @@ public class TestBlockCacheReporting {
@Test
public void testLruBlockCache() throws JsonGenerationException, JsonMappingException, IOException {
- CacheConfig cc = new CacheConfig(this.conf);
+ Configuration dup = new Configuration(this.conf);
+ dup.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F);
+ CacheConfig cc = new CacheConfig(dup);
assertTrue(cc.isBlockCacheEnabled());
assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
assertTrue(cc.getBlockCache() instanceof LruBlockCache);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index e1ae654..d2684a1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -269,7 +269,9 @@ public class TestCacheConfig {
@Test
public void testCacheConfigDefaultLRUBlockCache() {
- CacheConfig cc = new CacheConfig(this.conf);
+ Configuration conf = new Configuration(this.conf);
+ conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F);
+ CacheConfig cc = new CacheConfig(conf);
assertTrue(cc.isBlockCacheEnabled());
assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
basicBlockCacheOps(cc, false, true);
@@ -376,7 +378,7 @@ public class TestCacheConfig {
c.set(CacheConfig.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096");
c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024);
try {
- CacheConfig.getBucketCache(c);
+ CacheConfig.getBucketCache(c, false);
fail("Should throw IllegalArgumentException when passing illegal value for bucket size");
} catch (IllegalArgumentException e) {
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index c5bc9d7..b2a89b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.nio.MultiByteBuff;
import org.apache.hadoop.hbase.testclassification.IOTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -123,7 +124,6 @@ public class TestHFileBlockIndex {
// This test requires at least HFile format version 2.
conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
-
fs = HFileSystem.get(conf);
}
@@ -644,7 +644,6 @@ public class TestHFileBlockIndex {
writer.close();
}
-
// Read the HFile
HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, true, conf);
assertEquals(expectedNumLevels,
@@ -667,6 +666,7 @@ public class TestHFileBlockIndex {
checkKeyValue("i=" + i, keys[i], values[i],
ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey()), scanner.getValue());
}
+ scanner.close();
}
// Manually compute the mid-key and validate it.
@@ -725,9 +725,10 @@ public class TestHFileBlockIndex {
private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr,
ByteBuffer buf) {
- assertEquals(msgPrefix + ": expected " + Bytes.toStringBinary(arr)
- + ", actual " + Bytes.toStringBinary(buf), 0, Bytes.compareTo(arr, 0,
- arr.length, buf.array(), buf.arrayOffset(), buf.limit()));
+ assertEquals(
+ msgPrefix + ": expected " + Bytes.toStringBinary(arr) + ", actual "
+ + Bytes.toStringBinary(buf),
+ 0, ByteBufferUtils.compareTo(arr, 0, arr.length, buf, 0, buf.limit()));
}
/** Check a key/value pair after it was read by the reader */
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
index 7e2a0a5..95d1e7b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
@@ -76,11 +76,13 @@ public class TestScannerFromBucketCache {
test_util = HBaseTestingUtility.createLocalHTU();
conf = test_util.getConfiguration();
if (useBucketCache) {
- conf.setInt("hbase.bucketcache.size", 400);
+ conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 400);
conf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
conf.setInt("hbase.bucketcache.writer.threads", 10);
conf.setFloat("hfile.block.cache.size", 0.2f);
conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f);
+ } else {
+ conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 0);
}
tableName = TableName.valueOf(name.getMethodName());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
index b1ae855..26ceade 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
@@ -93,8 +94,9 @@ public class TestScannerSelectionUsingKeyRange {
@Test
public void testScannerSelection() throws IOException {
- Configuration conf = TEST_UTIL.getConfiguration();
+ Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setInt("hbase.hstore.compactionThreshold", 10000);
+ conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
.setBloomFilterType(bloomType);
HTableDescriptor htd = new HTableDescriptor(TABLE);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
index 459deeb..fc5d166 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
@@ -97,8 +98,9 @@ public class TestScannerSelectionUsingTTL {
@Test
public void testScannerSelection() throws IOException {
- Configuration conf = TEST_UTIL.getConfiguration();
+ Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setBoolean("hbase.store.delete.expired.storefile", false);
+ conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F);
HColumnDescriptor hcd =
new HColumnDescriptor(FAMILY_BYTES)
.setMaxVersions(Integer.MAX_VALUE)