From 28c7d41333d5f1916e96501f598928d35ea1fa55 Mon Sep 17 00:00:00 2001 From: chenheng Date: Tue, 17 May 2016 16:21:31 +0800 Subject: [PATCH] HBASE-15316 Add the ability to cache ROOT_INDEX on reads only --- .../apache/hadoop/hbase/io/hfile/CacheConfig.java | 63 ++++- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 4 + .../hadoop/hbase/io/hfile/HFileBlockIndex.java | 7 +- .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 8 +- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 2 +- .../hbase/io/hfile/TestCacheRootIndexOnly.java | 278 +++++++++++++++++++++ 6 files changed, 352 insertions(+), 10 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheRootIndexOnly.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index ffb9424..f6dac03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -65,6 +65,18 @@ public class CacheConfig { "hfile.block.index.cacheonwrite"; /** + * Configuration key to only cache root index blocks on write. + */ + public static final String CACHE_ROOT_INDEX_BLOCKS_ONLY_ON_WRITE_KEY = + "hfile.block.rootindex.only.cacheonwrite"; + + /** + * Configuration key to only cache root index blocks on read. + */ + public static final String CACHE_ROOT_INDEX_BLOCKS_ONLY_ON_READ_KEY = + "hfile.block.rootindex.only.cacheonread"; + + /** * Configuration key to cache compound bloom filter blocks on write. */ public static final String CACHE_BLOOM_BLOCKS_ON_WRITE_KEY = @@ -176,6 +188,8 @@ public class CacheConfig { public static final boolean DEFAULT_EVICT_ON_CLOSE = false; public static final boolean DEFAULT_CACHE_DATA_COMPRESSED = false; public static final boolean DEFAULT_PREFETCH_ON_OPEN = false; + public static final boolean DEFAULT_CACHE_ROOT_INDEX_ONLY_ON_READ = false; + public static final boolean DEFAULT_CACHE_ROOT_INDEX_ONLY_ON_WRITE = false; /** Local reference to the block cache, null if completely disabled */ private final BlockCache blockCache; @@ -218,6 +232,12 @@ public class CacheConfig { private final boolean dropBehindCompaction; + /** whether only cache root index block on write*/ + private boolean cacheRootIndexOnlyOnWrite; + + /** whether only cache root index block on read*/ + private boolean cacheRootIndexOnlyOnRead; + /** * Create a cache configuration using the specified configuration object and * family descriptor. @@ -244,7 +264,9 @@ public class CacheConfig { DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(), conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1, HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(), - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) + conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT), + conf.getBoolean(CACHE_ROOT_INDEX_BLOCKS_ONLY_ON_READ_KEY, DEFAULT_CACHE_ROOT_INDEX_ONLY_ON_READ), + conf.getBoolean(CACHE_ROOT_INDEX_BLOCKS_ONLY_ON_WRITE_KEY, DEFAULT_CACHE_ROOT_INDEX_ONLY_ON_WRITE) ); LOG.info("Created cacheConfig for " + family.getNameAsString() + ": " + this); } @@ -269,7 +291,9 @@ public class CacheConfig { conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN), conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1, HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1), - conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) + conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT), + conf.getBoolean(CACHE_ROOT_INDEX_BLOCKS_ONLY_ON_READ_KEY, DEFAULT_CACHE_ROOT_INDEX_ONLY_ON_READ), + conf.getBoolean(CACHE_ROOT_INDEX_BLOCKS_ONLY_ON_WRITE_KEY, DEFAULT_CACHE_ROOT_INDEX_ONLY_ON_WRITE) ); LOG.info("Created cacheConfig: " + this); } @@ -295,7 +319,8 @@ public class CacheConfig { final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite, final boolean cacheBloomsOnWrite, final boolean evictOnClose, final boolean cacheDataCompressed, final boolean prefetchOnOpen, - final boolean cacheDataInL1, final boolean dropBehindCompaction) { + final boolean cacheDataInL1, final boolean dropBehindCompaction, + final boolean cacheRootIndexOnRead, final boolean cacheRootIndexOnWrite) { this.blockCache = blockCache; this.cacheDataOnRead = cacheDataOnRead; this.inMemory = inMemory; @@ -307,6 +332,8 @@ public class CacheConfig { this.prefetchOnOpen = prefetchOnOpen; this.cacheDataInL1 = cacheDataInL1; this.dropBehindCompaction = dropBehindCompaction; + this.cacheRootIndexOnlyOnRead = cacheRootIndexOnRead; + this.cacheRootIndexOnlyOnWrite = cacheRootIndexOnWrite; } /** @@ -318,7 +345,8 @@ public class CacheConfig { cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite, cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose, cacheConf.cacheDataCompressed, cacheConf.prefetchOnOpen, - cacheConf.cacheDataInL1, cacheConf.dropBehindCompaction); + cacheConf.cacheDataInL1, cacheConf.dropBehindCompaction, + cacheConf.cacheRootIndexOnlyOnRead, cacheConf.cacheRootIndexOnlyOnWrite); } /** @@ -365,6 +393,17 @@ public class CacheConfig { } /** + * If cacheRootIndexOnlyOnRead is true, we will only cache ROOT_INDEX block on read. + * */ + public boolean shouldCacheBlockOnRead(BlockType blockType) { + if (cacheRootIndexOnlyOnRead) { + return isBlockCacheEnabled() && blockType == BlockType.ROOT_INDEX; + } else { + return shouldCacheBlockOnRead(blockType.getCategory()); + } + } + + /** * @return true if blocks in this file should be flagged as in-memory */ public boolean isInMemory() { @@ -383,6 +422,9 @@ public class CacheConfig { * written, false if not */ public boolean shouldCacheDataOnWrite() { + if (cacheRootIndexOnlyOnWrite) { + return false; + } return isBlockCacheEnabled() && this.cacheDataOnWrite; } @@ -411,6 +453,9 @@ public class CacheConfig { * is written, false if not */ public boolean shouldCacheIndexesOnWrite() { + if (cacheRootIndexOnlyOnWrite) { + return false; + } return isBlockCacheEnabled() && this.cacheIndexesOnWrite; } @@ -419,10 +464,20 @@ public class CacheConfig { * is written, false if not */ public boolean shouldCacheBloomsOnWrite() { + if (cacheRootIndexOnlyOnWrite) { + return false; + } return isBlockCacheEnabled() && this.cacheBloomsOnWrite; } /** + * @return true if cache root index block only. + * */ + public boolean shouldCacheRootIndexOnlyOnWrite() { + return isBlockCacheEnabled() && this.cacheRootIndexOnlyOnWrite; + } + + /** * @return true if blocks should be evicted from the cache when an HFile * reader is closed, false if not */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index efc9a30..c2d9fa8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1180,6 +1180,10 @@ public class HFileBlock implements Cacheable { return uncompressedBlockBytesWithHeader.length; } + public BlockType getBlockType() { + return blockType; + } + /** @return true if a block is being written */ boolean isWriting() { return state == State.WRITING; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 76fec06..22c2e9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -1072,6 +1072,11 @@ public class HFileBlockIndex { if (midKeyMetadata != null) blockStream.write(midKeyMetadata); blockWriter.writeHeaderAndData(out); + if (cacheConf != null && cacheConf.shouldCacheRootIndexOnlyOnWrite()) { + HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); + cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching, + rootLevelIndexPos, true, blockForCaching.getBlockType()), blockForCaching); + } } // Add root index block size @@ -1167,7 +1172,7 @@ public class HFileBlockIndex { byte[] curFirstKey = curChunk.getBlockKey(0); blockWriter.writeHeaderAndData(out); - if (cacheConf != null) { + if (cacheConf != null && cacheConf.shouldCacheIndexesOnWrite()) { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), blockForCaching); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index fc1c04e..2ca3db4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1367,7 +1367,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { BlockCacheKey cacheKey = new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); - cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); + cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META); if (cacheConf.isBlockCacheEnabled()) { HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null); @@ -1471,12 +1471,12 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread); validateBlockType(hfileBlock, expectedBlockType); HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader); - BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory(); + BlockType blockType = hfileBlock.getBlockType(); // Cache the block if necessary - if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) { + if (cacheBlock && cacheConf.shouldCacheBlockOnRead(blockType)) { cacheConf.getBlockCache().cacheBlock(cacheKey, - cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked, + cacheConf.shouldCacheCompressed(blockType.getCategory()) ? hfileBlock : unpacked, cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index b9cbb16..bfc0f68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -227,7 +227,7 @@ public class TestCacheOnWrite { new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), cowType.shouldBeCached(BlockType.LEAF_INDEX), cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, - false, false, false); + false, false, false, false, false); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheRootIndexOnly.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheRootIndexOnly.java new file mode 100644 index 0000000..3ef7a05 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheRootIndexOnly.java @@ -0,0 +1,278 @@ +package org.apache.hadoop.hbase.io.hfile; + +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.BloomFilterFactory; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ChecksumType; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.assertEquals; + +/** + * Test CacheConfig options + */ +@RunWith(Parameterized.class) +@Category({IOTests.class, MediumTests.class}) +public class TestCacheRootIndexOnly { + + private static final Log LOG = LogFactory.getLog(TestCacheRootIndexOnly.class); + + private static final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU(); + private Configuration conf; + private CacheConfig cacheConf; + private FileSystem fs; + private Random rand = new Random(12983177L); + private Path storeFilePath; + private BlockCache blockCache; + private String testDescription; + + private final CacheOnWriteType cowType; + private final Compression.Algorithm compress; + private final boolean cacheCompressedData; + + private static final int DATA_BLOCK_SIZE = 2048; + private static final int NUM_KV = 25000; + private static final int INDEX_BLOCK_SIZE = 512; + private static final int BLOOM_BLOCK_SIZE = 4096; + private static final BloomType BLOOM_TYPE = BloomType.ROWCOL; + private static final int CKBYTES = 512; + + /** The number of valid key types possible in a store file */ + private static final int NUM_VALID_KEY_TYPES = + KeyValue.Type.values().length - 2; + + private static enum CacheOnWriteType { + DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, + BlockType.DATA, BlockType.ENCODED_DATA), + BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, + BlockType.BLOOM_CHUNK), + INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, + BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX); + + private final String confKey; + private final BlockType blockType1; + private final BlockType blockType2; + + private CacheOnWriteType(String confKey, BlockType blockType) { + this(confKey, blockType, blockType); + } + + private CacheOnWriteType(String confKey, BlockType blockType1, + BlockType blockType2) { + this.blockType1 = blockType1; + this.blockType2 = blockType2; + this.confKey = confKey; + } + + public boolean shouldBeCached(BlockType blockType) { + return blockType == blockType1 || blockType == blockType2; + } + + public void modifyConf(Configuration conf) { + for (CacheOnWriteType cowType : CacheOnWriteType.values()) { + conf.setBoolean(cowType.confKey, cowType == this); + } + } + } + + public TestCacheRootIndexOnly(CacheOnWriteType cowType, Compression.Algorithm compress, + boolean cacheCompressedData, BlockCache blockCache) { + this.cowType = cowType; + this.compress = compress; + this.cacheCompressedData = cacheCompressedData; + this.blockCache = blockCache; + testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + + ", cacheCompressedData=" + cacheCompressedData + "]"; + LOG.info(testDescription); + } + + private static List getBlockCaches() throws IOException { + Configuration conf = TEST_UTIL.getConfiguration(); + List blockcaches = new ArrayList(); + // default + blockcaches.add(new CacheConfig(conf).getBlockCache()); + + // memory + BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration()); + blockcaches.add(lru); + + // bucket cache + FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir()); + int[] bucketSizes = + { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 }; + BlockCache bucketcache = + new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null); + blockcaches.add(bucketcache); + return blockcaches; + } + + @Parameterized.Parameters + public static Collection getParameters() throws IOException { + List params = new ArrayList(); + for (BlockCache blockCache : getBlockCaches()) { + for (CacheOnWriteType cowType : CacheOnWriteType.values()) { + for (Compression.Algorithm compress : HBaseTestingUtility.COMPRESSION_ALGORITHMS) { + for (boolean cacheCompressedData : new boolean[] { false, true }) { + params.add(new Object[] { cowType, compress, cacheCompressedData, blockCache }); + } + } + } + } + return params; + } + + private void clearBlockCache(BlockCache blockCache) throws InterruptedException { + if (blockCache instanceof LruBlockCache) { + ((LruBlockCache) blockCache).clearCache(); + } else { + // BucketCache may not return all cached blocks(blocks in write queue), so check it here. + for (int clearCount = 0; blockCache.getBlockCount() > 0; clearCount++) { + if (clearCount > 0) { + LOG.warn("clear block cache " + blockCache + " " + clearCount + " times, " + + blockCache.getBlockCount() + " blocks remaining"); + Thread.sleep(10); + } + for (CachedBlock block : Lists.newArrayList(blockCache)) { + BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); + // CombinedBucketCache may need evict two times. + for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) { + if (evictCount > 1) { + LOG.warn("evict block " + block + " in " + blockCache + " " + evictCount + + " times, maybe a bug here"); + } + } + } + } + } + } + + @Before + public void setUp() throws IOException { + conf = TEST_UTIL.getConfiguration(); + this.conf.set("dfs.datanode.data.dir.perm", "700"); + conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE); + conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, + BLOOM_BLOCK_SIZE); + conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); + cowType.modifyConf(conf); + fs = HFileSystem.get(conf); + CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache; + cacheConf = + new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), + cowType.shouldBeCached(BlockType.LEAF_INDEX), + cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, + false, false, false, true, true); + } + + @After + public void tearDown() throws IOException, InterruptedException { + clearBlockCache(blockCache); + } + + @AfterClass + public static void afterClass() throws IOException { + TEST_UTIL.cleanupTestDir(); + } + + private void testCacheRootIndexOnlyInternals() throws IOException { + writeStoreFile(); + readStoreFile(); + } + + private void readStoreFile() throws IOException { + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, conf); + LOG.info("HFile information: " + reader); + long offset = 0; + DataBlockEncoding encodingInCache = NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding(); + while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { + // Flags: don't cache the block, use pread, this is not a compaction. + // Also, pass null for expected block type to avoid checking it. + HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, + encodingInCache); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), + offset); + HFileBlock fromCache = (HFileBlock) blockCache.getBlock(blockCacheKey, true, false, true); + boolean isCached = fromCache != null; + if (isCached) { + assertEquals(BlockType.ROOT_INDEX, block.getBlockType()); + } + offset += block.getOnDiskSizeWithHeader(); + } + reader.close(); + } + + public static KeyValue.Type generateKeyType(Random rand) { + if (rand.nextBoolean()) { + // Let's make half of KVs puts. + return KeyValue.Type.Put; + } else { + KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; + if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) { + throw new RuntimeException("Generated an invalid key type: " + keyType + ". " + + "Probably the layout of KeyValue.Type has changed."); + } + return keyType; + } + } + + private void writeStoreFile() throws IOException { + Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), + "test_cache_on_write"); + HFileContext meta = new HFileContextBuilder().withCompression(compress) + .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL) + .withBlockSize(DATA_BLOCK_SIZE) + .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) + .withIncludesTags(false).build(); + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) + .withOutputDir(storeFileParentDir).withComparator(CellComparator.COMPARATOR) + .withFileContext(meta) + .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); + byte[] cf = Bytes.toBytes("fam"); + for (int i = 0; i < NUM_KV; ++i) { + byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i); + byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand); + byte[] value = RandomKeyValueUtil.randomValue(rand); + KeyValue kv = + new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, + rand.nextLong(), generateKeyType(rand), value, 0, value.length); + sfw.append(kv); + } + + sfw.close(); + storeFilePath = sfw.getPath(); + } + + @Test + public void testCacheRootIndexOnly() throws IOException { + testCacheRootIndexOnlyInternals(); + } + +} -- 1.9.3 (Apple Git-50)