null in case none should be used.
- *
- * @param conf The current configuration.
- * @return The block cache or null.
- */
- public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
- if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
- if (blockCacheDisabled) return null;
-
+ private static long getLruCacheSize(final Configuration conf, final MemoryUsage mu) {
float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
if (cachePercentage <= 0.0001f) {
blockCacheDisabled = true;
- return null;
+ return -1;
}
if (cachePercentage > 1.0) {
throw new IllegalArgumentException(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY +
@@ -473,8 +464,20 @@ public class CacheConfig {
}
// Calculate the amount of heap to give the heap.
+ return (long) (mu.getMax() * cachePercentage);
+ }
+
+ /**
+ * Returns the block cache or null in case none should be used.
+ *
+ * @param conf The current configuration.
+ * @return The block cache or null.
+ */
+ public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
+ if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
+ if (blockCacheDisabled) return null;
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
- long lruCacheSize = (long) (mu.getMax() * cachePercentage);
+ long lruCacheSize = getLruCacheSize(conf, mu);
int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE);
String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null);
@@ -492,26 +495,23 @@ public class CacheConfig {
int writerQueueLen = conf.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY,
DEFAULT_BUCKET_CACHE_WRITER_QUEUE);
String persistentPath = conf.get(BUCKET_CACHE_PERSISTENT_PATH_KEY);
- float combinedPercentage = conf.getFloat(
- BUCKET_CACHE_COMBINED_PERCENTAGE_KEY,
+ float combinedPercentage = conf.getFloat(BUCKET_CACHE_COMBINED_PERCENTAGE_KEY,
DEFAULT_BUCKET_CACHE_COMBINED_PERCENTAGE);
String[] configuredBucketSizes = conf.getStrings(BUCKET_CACHE_BUCKETS_KEY);
- int[] bucketSizes = null;
+ int [] bucketSizes = null;
if (configuredBucketSizes != null) {
bucketSizes = new int[configuredBucketSizes.length];
for (int i = 0; i < configuredBucketSizes.length; i++) {
bucketSizes[i] = Integer.parseInt(configuredBucketSizes[i]);
}
}
+ if (combinedWithLru) {
+ lruCacheSize = (long) ((1 - combinedPercentage) * bucketCacheSize);
+ bucketCacheSize = (long) (combinedPercentage * bucketCacheSize);
+ }
LOG.info("Allocating LruBlockCache size=" +
StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
LruBlockCache lruCache = new LruBlockCache(lruCacheSize, blockSize, true, conf);
- lruCache.setVictimCache(bucketCache);
- if (bucketCache != null && combinedWithLru) {
- GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(lruCache, bucketCache);
- } else {
- GLOBAL_BLOCK_CACHE_INSTANCE = lruCache;
- }
try {
int ioErrorsTolerationDuration = conf.getInt(
"hbase.bucketcache.ioengine.errors.tolerated.duration",
@@ -523,6 +523,12 @@ public class CacheConfig {
LOG.error("Can't instantiate bucket cache", ioex);
throw new RuntimeException(ioex);
}
+ if (combinedWithLru) {
+ GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(lruCache, bucketCache);
+ } else {
+ GLOBAL_BLOCK_CACHE_INSTANCE = lruCache;
+ }
+ lruCache.setVictimCache(bucketCache);
}
LOG.info("Allocating LruBlockCache size=" +
StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index dd2503c..5595439 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -698,7 +698,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
*/
static class EvictionThread extends HasThread {
private WeakReferencehbase.offheapcache.percentage
- * ({@link org.apache.hadoop.hbase.io.hfile.CacheConfig#SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY}) to some
- * value between 0 and 1 in
- * your hbase-site.xml file. This
- * enables {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache}, a facade over
- * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} and
- * {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache}. DoubleBlockCache works as follows.
- * When caching, it
- * "...attempts to cache the block in both caches, while readblock reads first from the faster
- * onheap cache before looking for the block in the off heap cache. Metrics are the
- * combined size and hits and misses of both caches." The value set in
- * hbase.offheapcache.percentage will be
- * multiplied by whatever the setting for -XX:MaxDirectMemorySize is in
- * your hbase-env.sh configuration file and this is what
- * will be used by {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} as its offheap store.
- * Onheap store will be whatever the float
- * {@link org.apache.hadoop.hbase.HConstants#HFILE_BLOCK_CACHE_SIZE_KEY} setting is
- * (some value between 0 and 1) times the size of the allocated java heap.
- *
- * Restart (or rolling restart) your cluster for the configs to take effect. Check logs to - * ensure your configurations came out as expected. + * (roughly because GC is less). See Nick Dimiduk's + * BlockCache 101 for some numbers. * *
hbase.offheapcache.percentage is not set (or set to 0).
- * At this point, it is probably best to read the code to learn the list of bucket cache options
- * and how they combine (to be fixed). Read the options and defaults for BucketCache in the
- * head of the {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}.
+ * Read the options and defaults for BucketCache in the head of the
+ * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}.
*
- * Here is a simple example of how to enable a 4G
- * offheap bucket cache with 1G onheap cache.
- * The onheap/offheap caches
- * are managed by {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache} by default. For the
- * CombinedBlockCache (from the class comment), "The smaller lruCache is used
+ *
Here is a simple example of how to enable a 4G offheap bucket cache with 1G
+ * onheap cache managed by {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}.
+ * CombinedBlockCache will put DATA blocks in the BucketCache and META blocks -- INDEX and BLOOMS
+ * -- in an instance of the LruBlockCache. For the
+ * CombinedBlockCache (from the class comment), "[t]he smaller lruCache is used
* to cache bloom blocks and index blocks, the larger bucketCache is used to
* cache data blocks. getBlock reads first from the smaller lruCache before
* looking for the block in the bucketCache. Metrics are the combined size and
* hits and misses of both caches." To disable CombinedBlockCache and have the BucketCache act
* as a strict L2 cache to the L1 LruBlockCache (i.e. on eviction from L1, blocks go to L2), set
- * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_COMBINED_KEY} to false.
- * Also by default, unless you change it,
- * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_COMBINED_PERCENTAGE_KEY}
- * defaults to 0.9 (see
- * the top of the CacheConfig in the BucketCache defaults section). This means that whatever
- * size you set for the bucket cache with
- * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_SIZE_KEY},
- * 90% will be used for offheap and 10% of the size will be used
- * by the onheap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}.
- *
Back to the example of setting an onheap cache of 1G and ofheap of 4G, in
- * hbase-env.sh ensure the java option -XX:MaxDirectMemorySize is
- * enabled and 5G in size: e.g. -XX:MaxDirectMemorySize=5G. Then in
- * hbase-site.xml add the following configurations:
+ * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_COMBINED_KEY} to false. By
+ * default, hbase.bucketcache.combinedcache.enabled (BUCKET_CACHE_COMBINED_KEY) is true.
+ *
+ *
Back to the example of setting an onheap cache of 1G and offheap of 4G with the BlockCache
+ * deploy managed by CombinedBlockCache. Setting hbase.bucketcache.ioengine and
+ * hbase.bucketcache.size > 0 enables CombinedBlockCache.
+ * In hbase-env.sh ensure the environment
+ * variable -XX:MaxDirectMemorySize is enabled and is bigger than 4G, say 5G in size:
+ * e.g. -XX:MaxDirectMemorySize=5G. This setting allows the JVM use offheap memory
+ * up to this upper limit. Allocate more than you need because there are other consumers of
+ * offheap memory other than BlockCache (for example DFSClient in the RegionServer uses offheap).
+ * In hbase-site.xml add the following configurations:
<property> <name>hbase.bucketcache.ioengine</name> <value>offheap</value> @@ -114,7 +73,8 @@ <property> <name>hbase.bucketcache.size</name> <value>5120</value> -</property>. Above we set a cache of 5G, 80% of which will be offheap (4G) and 1G onheap. +</property>. Above we set a cache of 5G, 80% of which will be offheap (4G) and 1G onheap + * (with DATA blocks in BucketCache and INDEX blocks in the onheap LruBlockCache). * Restart (or rolling restart) your cluster for the configs to take effect. Check logs to ensure * your configurations came out as expected. * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index bf7a93b..2c1f52b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -129,7 +129,7 @@ public class TestReplicaWithCluster { HTU.shutdownMiniCluster(); } - @Test + @Test (timeout=30000) public void testCreateDeleteTable() throws IOException { // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable"); @@ -162,7 +162,7 @@ public class TestReplicaWithCluster { HTU.deleteTable(hdt.getTableName()); } - @Test + @Test (timeout=30000) public void testChangeTable() throws Exception { HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable"); hdt.setRegionReplication(NB_SERVERS); @@ -210,16 +210,20 @@ public class TestReplicaWithCluster { HTU.getHBaseCluster().stopMaster(0); HBaseAdmin admin = new HBaseAdmin(HTU.getConfiguration()); - nHdt =admin.getTableDescriptor(hdt.getTableName()); - Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()), + try { + nHdt = admin.getTableDescriptor(hdt.getTableName()); + Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()), bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length); - admin.disableTable(hdt.getTableName()); - admin.deleteTable(hdt.getTableName()); - HTU.getHBaseCluster().startMaster(); + admin.disableTable(hdt.getTableName()); + admin.deleteTable(hdt.getTableName()); + HTU.getHBaseCluster().startMaster(); + } finally { + if (admin != null) admin.close(); + } } - @Test + @Test (timeout=30000) public void testReplicaAndReplication() throws Exception { HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaAndReplication"); hdt.setRegionReplication(NB_SERVERS); @@ -242,19 +246,23 @@ public class TestReplicaWithCluster { HTU2.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration()); - admin.addPeer("2", HTU2.getClusterKey()); + try { + admin.addPeer("2", HTU2.getClusterKey()); + } finally { + if (admin != null) admin.close(); + } Put p = new Put(row); p.add(row, row, row); final HTable table = new HTable(HTU.getConfiguration(), hdt.getTableName()); - table.put(p); - - HTU.getHBaseAdmin().flush(table.getTableName()); - LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster."); + try { + table.put(p); + HTU.getHBaseAdmin().flush(table.getTableName()); + LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster."); - Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate