From d6e56736e62acca269887aea46757f0f14b36ece Mon Sep 17 00:00:00 2001 From: "terence.yoo" Date: Tue, 27 Oct 2015 17:05:58 +0900 Subject: [PATCH] change a single instance of block cache to a map of region server and block cache --- .../apache/hadoop/hbase/io/hfile/CacheConfig.java | 91 ++++++++-- .../apache/hadoop/hbase/mob/MobCacheConfig.java | 24 ++- .../hadoop/hbase/regionserver/HRegionServer.java | 8 + .../hbase/io/hfile/TestBlockCacheReporting.java | 4 +- .../hadoop/hbase/io/hfile/TestCacheConfig.java | 4 +- .../io/hfile/TestCacheConfigMultipleInstances.java | 187 +++++++++++++++++++++ .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 2 +- .../io/hfile/TestForceCacheImportantBlocks.java | 2 +- .../io/hfile/TestLazyDataBlockDecompression.java | 12 +- .../mob/TestMobCacheConfigMultipleInstances.java | 187 +++++++++++++++++++++ 10 files changed, 496 insertions(+), 25 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfigMultipleInstances.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCacheConfigMultipleInstances.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index d6bdec0..c3cb93b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -23,6 +23,8 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.MemoryUsage; +import java.util.HashMap; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -135,6 +137,16 @@ public class CacheConfig { private static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true; /** + * Configuration key for multiple cache instances + */ + public static final String REGION_SERVER_HASH_KEY = "hbase.blockcache.regionserver.hash"; + + /** + * Configuration default value for multiple cache instances + */ + public static final String REGION_SERVER_HASH_DEFAULT = "NONE"; + + /** * Enum of all built in external block caches. * This is used for config. */ @@ -509,12 +521,13 @@ public class CacheConfig { // Static block cache reference and methods /** - * Static reference to the block cache, or null if no caching should be used - * at all. + * Static reference to the block cache, or null if no caching should be used at all. + * Clear this if in tests you'd make more than one block cache instance. + * + * When hbase is running in standalone mode or HBaseTestingUtility, + * the block cache instances should be mapped to each region servers. */ - // Clear this if in tests you'd make more than one block cache instance. - @VisibleForTesting - static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE; + private static Map GLOBAL_BLOCK_CACHE_INSTANCE = new HashMap<>(); /** Boolean whether we have disabled the block cache entirely. */ @VisibleForTesting @@ -651,7 +664,10 @@ public class CacheConfig { * @return The block cache or null. */ public static synchronized BlockCache instantiateBlockCache(Configuration conf) { - if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE; + String regionServerHash = getRegionServerHash(conf); + BlockCache blockCache = GLOBAL_BLOCK_CACHE_INSTANCE.get(regionServerHash); + if (blockCache != null) return blockCache; + if (blockCacheDisabled) return null; MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); LruBlockCache l1 = getL1(conf, mu); @@ -659,26 +675,79 @@ public class CacheConfig { if (blockCacheDisabled) return null; BlockCache l2 = getL2(conf, mu); if (l2 == null) { - GLOBAL_BLOCK_CACHE_INSTANCE = l1; + blockCache = l1; } else { boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY, DEFAULT_BUCKET_CACHE_COMBINED); if (useExternal) { - GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2); + blockCache = new InclusiveCombinedBlockCache(l1, l2); } else { if (combinedWithLru) { - GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2); + blockCache = new CombinedBlockCache(l1, l2); } else { // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler // mechanism. It is a little ugly but works according to the following: when the // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get // a block from the L1 cache, if not in L1, we will search L2. - GLOBAL_BLOCK_CACHE_INSTANCE = l1; + blockCache = l1; } } l1.setVictimCache(l2); } - return GLOBAL_BLOCK_CACHE_INSTANCE; + GLOBAL_BLOCK_CACHE_INSTANCE.put(regionServerHash, blockCache); + return blockCache; + } + + /** + * Returns the region server hash as String from Configuration. + * + * @param conf the current configuration + * @return the hash of region server + */ + protected static String getRegionServerHash(Configuration conf) { + return conf.get(REGION_SERVER_HASH_KEY, REGION_SERVER_HASH_DEFAULT); + } + + /** + * Sets region server hash to the configuration. This is used in HRegionServer initializing. + * + * @param conf the current configuration + * @param hash the hash of region server + */ + public static void setRegionServerHash(Configuration conf, int hash) { + conf.set(CacheConfig.REGION_SERVER_HASH_KEY, String.valueOf(hash)); + } + + /** + * Clears block cache instance of the current region server. + * The hash of region server is extracted from conf. + * + * @param conf the current configuration + */ + @VisibleForTesting + static void clearBlockCache(Configuration conf) { + GLOBAL_BLOCK_CACHE_INSTANCE.put(getRegionServerHash(conf), null); + } + + /** + * Sets block cache instance of the current region server. + * + * @param conf the current configuration + * @param blockCache the block cache instance + */ + @VisibleForTesting + static void setBlockCache(Configuration conf, BlockCache blockCache) { + GLOBAL_BLOCK_CACHE_INSTANCE.put(getRegionServerHash(conf), blockCache); + } + + /** + * Returns the number of block cache instances. + * + * @return the number of block cache instances + */ + @VisibleForTesting + static int getBlockCacheInstanceCount() { + return GLOBAL_BLOCK_CACHE_INSTANCE.size(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java index 6c80355..cf6e25b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java @@ -18,27 +18,34 @@ */ package org.apache.hadoop.hbase.mob; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import java.util.HashMap; +import java.util.Map; + /** * The cache configuration for the mob. */ @InterfaceAudience.Private public class MobCacheConfig extends CacheConfig { - private static MobFileCache mobFileCache; + private static Map mobFileCacheMap = new HashMap<>(); + private final String regionServerHash; public MobCacheConfig(Configuration conf, HColumnDescriptor family) { super(conf, family); instantiateMobFileCache(conf); + this.regionServerHash = getRegionServerHash(conf); } public MobCacheConfig(Configuration conf) { super(conf); instantiateMobFileCache(conf); + this.regionServerHash = getRegionServerHash(conf); } /** @@ -47,9 +54,12 @@ public class MobCacheConfig extends CacheConfig { * @return The current instance of MobFileCache. */ public static synchronized MobFileCache instantiateMobFileCache(Configuration conf) { + String regionServerHash = getRegionServerHash(conf); + MobFileCache mobFileCache = mobFileCacheMap.get(regionServerHash); if (mobFileCache == null) { mobFileCache = new MobFileCache(conf); } + mobFileCacheMap.put(regionServerHash, mobFileCache); return mobFileCache; } @@ -58,6 +68,16 @@ public class MobCacheConfig extends CacheConfig { * @return The MobFileCache. */ public MobFileCache getMobFileCache() { - return mobFileCache; + return mobFileCacheMap.get(regionServerHash); + } + + /** + * Returns the number of cache instances. + * + * @return the number of cache instances + */ + @VisibleForTesting + static int getMobFileCacheCount() { + return mobFileCacheMap.size(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8a5e423..ccd302d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -557,6 +557,7 @@ public class HRegionServer extends HasThread implements login(userProvider, hostName); regionServerAccounting = new RegionServerAccounting(); + CacheConfig.setRegionServerHash(conf, hashCode()); cacheConfig = new CacheConfig(conf); mobCacheConfig = new MobCacheConfig(conf); uncaughtExceptionHandler = new UncaughtExceptionHandler() { @@ -3297,6 +3298,13 @@ public class HRegionServer extends HasThread implements } /** + * @return The mob cache config instance used by the regionserver. + */ + public MobCacheConfig getMobCacheConfig() { + return this.mobCacheConfig; + } + + /** * @return : Returns the ConfigurationManager object for testing purposes. */ protected ConfigurationManager getConfigurationManager() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java index 4080249..feeb25c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java @@ -46,14 +46,14 @@ public class TestBlockCacheReporting { @Before public void setUp() throws Exception { - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; this.conf = HBaseConfiguration.create(); + CacheConfig.clearBlockCache(conf); } @After public void tearDown() throws Exception { // Let go of current block cache. - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; + CacheConfig.clearBlockCache(conf); } private void addDataAndHits(final BlockCache bc, final int count) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index ad08794..a911c46 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -156,14 +156,14 @@ public class TestCacheConfig { @Before public void setUp() throws Exception { - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; this.conf = HBaseConfiguration.create(); + CacheConfig.clearBlockCache(conf); } @After public void tearDown() throws Exception { // Let go of current block cache. - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; + CacheConfig.clearBlockCache(conf); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfigMultipleInstances.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfigMultipleInstances.java new file mode 100644 index 0000000..b688fb2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfigMultipleInstances.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; + +/** + * Tests that multiple block cache instance within a single process do as expected. + */ +@Category({IOTests.class, MediumTests.class}) +public class TestCacheConfigMultipleInstances { + private static final Log LOG = LogFactory.getLog(TestCacheConfigMultipleInstances.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final int NUM_MASTERS = 1; + private static final int NUM_SLAVES = 2; + private static final String CF = "d"; + private static Connection connection; + private static Admin admin; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_SLAVES); + connection = TEST_UTIL.getConnection(); + admin = connection.getAdmin(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + if (admin != null) admin.close(); + if (connection != null) connection.close(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMultipleBlockCacheInstances() throws Exception { + assertEquals(NUM_MASTERS + NUM_SLAVES, CacheConfig.getBlockCacheInstanceCount()); + + HRegionServer[] regionServers = new HRegionServer[NUM_SLAVES]; + for (int i = 0; i < NUM_SLAVES; i++) + regionServers[i] = TEST_UTIL.getHBaseCluster().getRegionServer(i); + + TableName tableName = TableName.valueOf("test"); + createTable(tableName, CF); + + int servingRegionServer = findServingRegionServer(tableName, regionServers); + + generateTestData(tableName); + + long[] hitCountsPrev = new long[NUM_SLAVES]; + long[] missCountsPrev = new long[NUM_SLAVES]; + long[] hitCountsCur = new long[NUM_SLAVES]; + long[] missCountsCur = new long[NUM_SLAVES]; + + // initial stats + gatherStats(regionServers, hitCountsPrev, missCountsPrev, hitCountsCur, missCountsCur); + + // first scan. cache miss is expected + scanTestData(tableName); + gatherStats(regionServers, hitCountsPrev, missCountsPrev, hitCountsCur, missCountsCur); + for (int i = 0; i < NUM_SLAVES; i++) { + LOG.info("hitCountsPrev[" + i + "]=" + hitCountsPrev[i] + + ", missCountsPrev[" + i + "]=" + missCountsPrev[i] + + ", hitCountsCur[" + i + "]=" + hitCountsCur[i] + + ", missCountsCur[" + i + "]=" + missCountsCur[i]); + if (i == servingRegionServer) { + assertEquals(0, hitCountsCur[i] - hitCountsPrev[i]); + assertEquals(1, missCountsCur[i] - missCountsPrev[i]); + } else { + assertEquals(0, hitCountsCur[i] - hitCountsPrev[i]); + assertEquals(0, missCountsCur[i] - missCountsPrev[i]); + } + } + + // second scan. cache hit is expected + scanTestData(tableName); + gatherStats(regionServers, hitCountsPrev, missCountsPrev, hitCountsCur, missCountsCur); + for (int i = 0; i < NUM_SLAVES; i++) { + LOG.info("hitCountsPrev[" + i + "]=" + hitCountsPrev[i] + + ", missCountsPrev[" + i + "]=" + missCountsPrev[i] + + ", hitCountsCur[" + i + "]=" + hitCountsCur[i] + + ", missCountsCur[" + i + "]=" + missCountsCur[i]); + if (i == servingRegionServer) { + assertEquals(1, hitCountsCur[i] - hitCountsPrev[i]); + assertEquals(0, missCountsCur[i] - missCountsPrev[i]); + } else { + assertEquals(0, hitCountsCur[i] - hitCountsPrev[i]); + assertEquals(0, missCountsCur[i] - missCountsPrev[i]); + } + } + } + + private void scanTestData(TableName tableName) throws IOException { + try (Table table = connection.getTable(tableName)) { + Scan scan = new Scan(); + try (ResultScanner scanner = table.getScanner(scan)) { + scanner.next(); + } + } + } + + private void gatherStats(HRegionServer[] regionServers, long[] hitCountsPrev, + long[] missCountsPrev, long[] hitCountsCur, long[] missCountsCur) { + for (int i = 0; i < NUM_SLAVES; i++) { + hitCountsPrev[i] = hitCountsCur[i]; + missCountsPrev[i] = missCountsCur[i]; + hitCountsCur[i] = getHitCount(regionServers, i); + missCountsCur[i] = getMissCount(regionServers, i); + } + } + + private long getHitCount(HRegionServer[] regionServers, int servingRegionServer) { + return regionServers[servingRegionServer].getCacheConfig() + .getBlockCache().getStats().getHitCount(); + } + + private long getMissCount(HRegionServer[] regionServers, int servingRegionServer) { + return regionServers[servingRegionServer].getCacheConfig(). + getBlockCache().getStats().getMissCount(); + } + + private void generateTestData(TableName tableName) throws IOException { + try (Table table = connection.getTable(tableName)) { + Put put = new Put("a".getBytes()); + put.addColumn(CF.getBytes(), "c1".getBytes(), "a".getBytes()); + table.put(put); + + // flush memstore to use block cache + admin.flush(tableName); + } + } + + private int findServingRegionServer(TableName tableName, HRegionServer[] regionServers) + throws IOException { + int servingRegionServerIndex = -1; + for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[0].getServerName())) { + if (hRegionInfo.getTable().equals(tableName)) { + servingRegionServerIndex = 0; + break; + } + } + if (servingRegionServerIndex < 0) { + for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[1].getServerName())) { + if (hRegionInfo.getTable().equals(tableName)) { + servingRegionServerIndex = 1; + break; + } + } + } + return servingRegionServerIndex; + } + + private void createTable(TableName tableName, String CF) throws IOException { + HTableDescriptor td = new HTableDescriptor(tableName); + HColumnDescriptor cd = new HColumnDescriptor(CF); + td.addFamily(cd); + admin.createTable(td); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 2c957ef..bb73424 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -221,7 +221,7 @@ public class TestCacheOnWrite { conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); cowType.modifyConf(conf); fs = HFileSystem.get(conf); - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache; + CacheConfig.setBlockCache(conf, blockCache); cacheConf = new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), cowType.shouldBeCached(BlockType.LEAF_INDEX), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index cf2aca5..8323962 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -96,7 +96,7 @@ public class TestForceCacheImportantBlocks { @Before public void setup() { // Make sure we make a new one each time. - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; + CacheConfig.clearBlockCache(TEST_UTIL.getConfiguration()); HFile.dataBlockReadCnt.set(0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java index 0067417..1f74b71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java @@ -73,13 +73,13 @@ public class TestLazyDataBlockDecompression { @Before public void setUp() throws IOException { - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; + CacheConfig.clearBlockCache(TEST_UTIL.getConfiguration()); fs = FileSystem.get(TEST_UTIL.getConfiguration()); } @After public void tearDown() { - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; + CacheConfig.clearBlockCache(TEST_UTIL.getConfiguration()); fs = null; } @@ -150,8 +150,8 @@ public class TestLazyDataBlockDecompression { lazyCompressDisabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, false); - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = - new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressDisabled); + CacheConfig.setBlockCache(lazyCompressDisabled, + new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressDisabled)); CacheConfig cc = new CacheConfig(lazyCompressDisabled); assertFalse(cc.shouldCacheDataCompressed()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); @@ -185,8 +185,8 @@ public class TestLazyDataBlockDecompression { lazyCompressEnabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cacheOnWrite); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true); - CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = - new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressEnabled); + CacheConfig.setBlockCache(lazyCompressDisabled, + new LruBlockCache(maxSize, HConstants.DEFAULT_BLOCKSIZE, false, lazyCompressEnabled)); cc = new CacheConfig(lazyCompressEnabled); assertTrue("test improperly configured.", cc.shouldCacheDataCompressed()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCacheConfigMultipleInstances.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCacheConfigMultipleInstances.java new file mode 100644 index 0000000..a9dc7ca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCacheConfigMultipleInstances.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mob; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; + +/** + * Tests that multiple mob file cache instance within a single process do as expected. + */ +@Category(MediumTests.class) +public class TestMobCacheConfigMultipleInstances { + private static final Log LOG = LogFactory.getLog(TestMobCacheConfigMultipleInstances.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final int NUM_MASTERS = 1; + private static final int NUM_SLAVES = 2; + private static final String CF = "d"; + private static Connection connection; + private static Admin admin; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_SLAVES); + connection = TEST_UTIL.getConnection(); + admin = connection.getAdmin(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + if (admin != null) admin.close(); + if (connection != null) connection.close(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMultipleBlockCacheInstances() throws Exception { + Assert.assertEquals(NUM_MASTERS + NUM_SLAVES, MobCacheConfig.getMobFileCacheCount()); + + HRegionServer[] regionServers = new HRegionServer[NUM_SLAVES]; + for (int i = 0; i < NUM_SLAVES; i++) + regionServers[i] = TEST_UTIL.getHBaseCluster().getRegionServer(i); + + TableName tableName = TableName.valueOf("test"); + createTable(tableName, CF); + + int servingRegionServer = findServingRegionServer(tableName, regionServers); + + generateTestData(tableName); + + long[] accessCountsPrev = new long[NUM_SLAVES]; + long[] missCountsPrev = new long[NUM_SLAVES]; + long[] accessCountsCur = new long[NUM_SLAVES]; + long[] missCountsCur = new long[NUM_SLAVES]; + + // initial stats + gatherStats(regionServers, accessCountsPrev, missCountsPrev, accessCountsCur, missCountsCur); + + // first scan. cache miss is expected + scanTestData(tableName); + gatherStats(regionServers, accessCountsPrev, missCountsPrev, accessCountsCur, missCountsCur); + for (int i = 0; i < NUM_SLAVES; i++) { + LOG.info("accessCountsPrev[" + i + "]=" + accessCountsPrev[i] + + ", missCountsPrev[" + i + "]=" + missCountsPrev[i] + + ", accessCountsCur[" + i + "]=" + accessCountsCur[i] + + ", missCountsCur[" + i + "]=" + missCountsCur[i]); + if (i == servingRegionServer) { + Assert.assertEquals(1, accessCountsCur[i] - accessCountsPrev[i]); + Assert.assertEquals(1, missCountsCur[i] - missCountsPrev[i]); + } else { + Assert.assertEquals(0, accessCountsCur[i] - accessCountsPrev[i]); + Assert.assertEquals(0, missCountsCur[i] - missCountsPrev[i]); + } + } + + // second scan. cache miss is not expected + scanTestData(tableName); + gatherStats(regionServers, accessCountsPrev, missCountsPrev, accessCountsCur, missCountsCur); + for (int i = 0; i < NUM_SLAVES; i++) { + LOG.info("accessCountsPrev[" + i + "]=" + accessCountsPrev[i] + + ", missCountsPrev[" + i + "]=" + missCountsPrev[i] + + ", accessCountsCur[" + i + "]=" + accessCountsCur[i] + + ", missCountsCur[" + i + "]=" + missCountsCur[i]); + if (i == servingRegionServer) { + Assert.assertEquals(1, accessCountsCur[i] - accessCountsPrev[i]); + Assert.assertEquals(0, missCountsCur[i] - missCountsPrev[i]); + } else { + Assert.assertEquals(0, accessCountsCur[i] - accessCountsPrev[i]); + Assert.assertEquals(0, missCountsCur[i] - missCountsPrev[i]); + } + } + } + + private void scanTestData(TableName tableName) throws IOException { + try (Table table = connection.getTable(tableName)) { + Scan scan = new Scan(); + try (ResultScanner scanner = table.getScanner(scan)) { + scanner.next(); + } + } + } + + private void gatherStats(HRegionServer[] regionServers, long[] accessCountsPrev, + long[] missCountsPrev, long[] accessCountsCur, long[] missCountsCur) { + for (int i = 0; i < NUM_SLAVES; i++) { + accessCountsPrev[i] = accessCountsCur[i]; + missCountsPrev[i] = missCountsCur[i]; + accessCountsCur[i] = getAccessCount(regionServers, i); + missCountsCur[i] = getMissCount(regionServers, i); + } + } + + private long getAccessCount(HRegionServer[] regionServers, int servingRegionServer) { + return regionServers[servingRegionServer].getMobCacheConfig() + .getMobFileCache().getAccessCount(); + } + + private long getMissCount(HRegionServer[] regionServers, int servingRegionServer) { + return regionServers[servingRegionServer].getMobCacheConfig() + .getMobFileCache().getMissCount(); + } + + private void generateTestData(TableName tableName) throws IOException { + try (Table table = connection.getTable(tableName)) { + Put put = new Put("a".getBytes()); + put.addColumn(CF.getBytes(), "c1".getBytes(), "a".getBytes()); + table.put(put); + + // flush memstore to use block cache + admin.flush(tableName); + } + } + + private int findServingRegionServer(TableName tableName, HRegionServer[] regionServers) + throws IOException { + int servingRegionServerIndex = -1; + for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[0].getServerName())) { + if (hRegionInfo.getTable().equals(tableName)) { + servingRegionServerIndex = 0; + break; + } + } + if (servingRegionServerIndex < 0) { + for (HRegionInfo hRegionInfo : admin.getOnlineRegions(regionServers[1].getServerName())) { + if (hRegionInfo.getTable().equals(tableName)) { + servingRegionServerIndex = 1; + break; + } + } + } + return servingRegionServerIndex; + } + + private void createTable(TableName tableName, String CF) throws IOException { + HTableDescriptor td = new HTableDescriptor(tableName); + HColumnDescriptor cd = new HColumnDescriptor(CF); + cd.setMobEnabled(true); + cd.setMobThreshold(0); + td.addFamily(cd); + admin.createTable(td); + } +} -- 2.4.9 (Apple Git-60)