From 475b312d104b0f0692dc9d9635f50a62ab5848f9 Mon Sep 17 00:00:00 2001 From: zhangduo Date: Fri, 20 Feb 2015 08:35:35 +0800 Subject: [PATCH] HBASE-13070 Fix TestCacheOnWrite --- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 130 +++++++++++---------- 1 file changed, 68 insertions(+), 62 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index b13c076..c63367a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.io.hfile; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -29,7 +29,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.EnumMap; import java.util.List; -import java.util.Map; import java.util.Random; import org.apache.commons.logging.Log; @@ -37,7 +36,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -59,6 +57,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -66,6 +65,8 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; +import com.google.common.collect.Lists; + /** * Tests {@link HFile} cache-on-write functionality for the following block * types: data blocks, non-root index blocks, and Bloom filter blocks. @@ -170,7 +171,7 @@ public class TestCacheOnWrite { this.blockCache = blockCache; testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + ", encoderType=" + encoderType + ", cacheCompressedData=" + cacheCompressedData + "]"; - System.out.println(testDescription); + LOG.info(testDescription); } private static List getBlockCaches() throws IOException { @@ -185,10 +186,10 @@ public class TestCacheOnWrite { // bucket cache FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir()); - int[] bucketSizes = {INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024 }; + int[] bucketSizes = + { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 }; BlockCache bucketcache = - new BucketCache("file:" + TEST_UTIL.getDataTestDir() + "/bucket.data", - 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null); + new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null); blockcaches.add(bucketcache); return blockcaches; } @@ -210,6 +211,22 @@ public class TestCacheOnWrite { return cowTypes; } + private void clearBlockCache(BlockCache blockCache) { + if (blockCache instanceof LruBlockCache) { + ((LruBlockCache) blockCache).clearCache(); + } else { + // BucketCache may not return all cached blocks(blocks in write queue), so check it here. + while (blockCache.getBlockCount() > 0) { + for (CachedBlock block : Lists.newArrayList(blockCache)) { + BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); + // CombinedBucketCache may need evict two times. + while (blockCache.evictBlock(key)) { + } + } + } + } + } + @Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); @@ -221,6 +238,7 @@ public class TestCacheOnWrite { conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); cowType.modifyConf(conf); fs = HFileSystem.get(conf); + CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache; cacheConf = new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), cowType.shouldBeCached(BlockType.LEAF_INDEX), @@ -228,18 +246,16 @@ public class TestCacheOnWrite { } @After - public void tearDown() { - cacheConf = new CacheConfig(conf); - blockCache = cacheConf.getBlockCache(); + public void tearDown() throws IOException { + clearBlockCache(blockCache); } - @Test - public void testStoreFileCacheOnWrite() throws IOException { - testStoreFileCacheOnWriteInternals(false); - testStoreFileCacheOnWriteInternals(true); + @AfterClass + public static void afterClass() throws IOException { + TEST_UTIL.cleanupTestDir(); } - protected void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOException { + private void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOException { writeStoreFile(useTags); readStoreFile(useTags); } @@ -323,15 +339,15 @@ public class TestCacheOnWrite { encoderType.encode ? BlockType.ENCODED_DATA : BlockType.DATA; if (useTags) { assertEquals("{" + cachedDataBlockType - + "=1550, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=20}", countByType); + + "=2663, LEAF_INDEX=297, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=34}", countByType); } else { assertEquals("{" + cachedDataBlockType - + "=1379, LEAF_INDEX=154, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=18}", countByType); + + "=2498, LEAF_INDEX=278, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=31}", countByType); } // iterate all the keyvalue from hfile while (scanner.next()) { - Cell cell = scanner.getKeyValue(); + scanner.getKeyValue(); } reader.close(); } @@ -341,18 +357,16 @@ public class TestCacheOnWrite { // Let's make half of KVs puts. return KeyValue.Type.Put; } else { - KeyValue.Type keyType = - KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; - if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) - { - throw new RuntimeException("Generated an invalid key type: " + keyType - + ". " + "Probably the layout of KeyValue.Type has changed."); + KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; + if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) { + throw new RuntimeException("Generated an invalid key type: " + keyType + ". " + + "Probably the layout of KeyValue.Type has changed."); } return keyType; } } - public void writeStoreFile(boolean useTags) throws IOException { + private void writeStoreFile(boolean useTags) throws IOException { if(useTags) { TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); } else { @@ -368,12 +382,11 @@ public class TestCacheOnWrite { .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR) .withFileContext(meta) .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); - - final int rowLen = 32; + byte[] cf = Bytes.toBytes("fam"); for (int i = 0; i < NUM_KV; ++i) { - byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i); - byte[] v = TestHFileWriterV2.randomValue(rand); - int cfLen = rand.nextInt(k.length - rowLen + 1); + byte[] row = TestHFileWriterV2.randomOrderedKey(rand, i); + byte[] qualifier = TestHFileWriterV2.randomRowOrQualifier(rand); + byte[] value = TestHFileWriterV2.randomValue(rand); KeyValue kv; if(useTags) { Tag t = new Tag((byte) 1, "visibility"); @@ -381,21 +394,13 @@ public class TestCacheOnWrite { tagList.add(t); Tag[] tags = new Tag[1]; tags[0] = t; - kv = new KeyValue( - k, 0, rowLen, - k, rowLen, cfLen, - k, rowLen + cfLen, k.length - rowLen - cfLen, - rand.nextLong(), - generateKeyType(rand), - v, 0, v.length, tagList); + kv = + new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, + rand.nextLong(), generateKeyType(rand), value, 0, value.length, tagList); } else { - kv = new KeyValue( - k, 0, rowLen, - k, rowLen, cfLen, - k, rowLen + cfLen, k.length - rowLen - cfLen, - rand.nextLong(), - generateKeyType(rand), - v, 0, v.length); + kv = + new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, + rand.nextLong(), generateKeyType(rand), value, 0, value.length); } sfw.append(kv); } @@ -404,13 +409,7 @@ public class TestCacheOnWrite { storeFilePath = sfw.getPath(); } - @Test - public void testNotCachingDataBlocksDuringCompaction() throws IOException { - testNotCachingDataBlocksDuringCompactionInternals(false); - testNotCachingDataBlocksDuringCompactionInternals(true); - } - - protected void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException { + private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException { if (useTags) { TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); } else { @@ -459,20 +458,27 @@ public class TestCacheOnWrite { } region.flushcache(); } - LruBlockCache blockCache = - (LruBlockCache) new CacheConfig(conf).getBlockCache(); - blockCache.clearCache(); - assertEquals(0, blockCache.getBlockTypeCountsForTest().size()); + clearBlockCache(blockCache); + assertEquals(0, blockCache.getBlockCount()); region.compactStores(); LOG.debug("compactStores() returned"); - Map blockTypesInCache = - blockCache.getBlockTypeCountsForTest(); - LOG.debug("Block types in cache: " + blockTypesInCache); - assertNull(blockTypesInCache.get(BlockType.ENCODED_DATA)); - assertNull(blockTypesInCache.get(BlockType.DATA)); + for (CachedBlock block: blockCache) { + assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); + assertNotEquals(BlockType.DATA, block.getBlockType()); + } region.close(); - blockCache.shutdown(); } -} + @Test + public void testStoreFileCacheOnWrite() throws IOException { + testStoreFileCacheOnWriteInternals(false); + testStoreFileCacheOnWriteInternals(true); + } + + @Test + public void testNotCachingDataBlocksDuringCompaction() throws IOException { + testNotCachingDataBlocksDuringCompactionInternals(false); + testNotCachingDataBlocksDuringCompactionInternals(true); + } +} -- 1.9.1