Index: src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (revision 1176491) +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (working copy) @@ -173,7 +173,7 @@ int numBlocksRead = 0; long pos = 0; while (pos < totalSize) { - b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread); + b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, true, pread); b.sanityCheck(); pos += block.length; numBlocksRead++; @@ -205,7 +205,7 @@ FSDataInputStream is = fs.open(path); HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo, totalSize); - HFileBlock b = hbr.readBlockData(0, -1, -1, pread); + HFileBlock b = hbr.readBlockData(0, -1, -1, true, pread); is.close(); b.sanityCheck(); @@ -216,12 +216,12 @@ if (algo == GZ) { is = fs.open(path); hbr = new HFileBlock.FSReaderV2(is, algo, totalSize); - b = hbr.readBlockData(0, 2173 + HFileBlock.HEADER_SIZE, -1, pread); + b = hbr.readBlockData(0, 2173 + HFileBlock.HEADER_SIZE, -1, true, pread); assertEquals(blockStr, b.toString()); int wrongCompressedSize = 2172; try { b = hbr.readBlockData(0, wrongCompressedSize - + HFileBlock.HEADER_SIZE, -1, pread); + + HFileBlock.HEADER_SIZE, -1, true, pread); fail("Exception expected"); } catch (IOException ex) { String expectedPrefix = "On-disk size without header provided is " @@ -266,7 +266,7 @@ assertEquals(expectedOffsets.get(i).longValue(), curOffset); LOG.info("Reading block #" + i + " at offset " + curOffset); - HFileBlock b = hbr.readBlockData(curOffset, -1, -1, pread); + HFileBlock b = hbr.readBlockData(curOffset, -1, -1, true, pread); LOG.info("Block #" + i + ": " + b); assertEquals("Invalid block #" + i + "'s type:", expectedTypes.get(i), b.getBlockType()); @@ -279,7 +279,7 @@ // Now re-load this block knowing the on-disk size. This tests a // different branch in the loader. HFileBlock b2 = hbr.readBlockData(curOffset, - b.getOnDiskSizeWithHeader(), -1, pread); + b.getOnDiskSizeWithHeader(), -1, true, pread); b2.sanityCheck(); assertEquals(b.getBlockType(), b2.getBlockType()); @@ -370,7 +370,7 @@ HFileBlock b; try { long onDiskSizeArg = withOnDiskSize ? expectedSize : -1; - b = hbr.readBlockData(offset, onDiskSizeArg, -1, pread); + b = hbr.readBlockData(offset, onDiskSizeArg, -1, true, pread); } catch (IOException ex) { LOG.error("Error in client " + clientId + " trying to read block at " + offset + ", pread=" + pread + ", withOnDiskSize=" + Index: src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java (revision 1176491) +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java (working copy) @@ -164,7 +164,7 @@ while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = reader.readBlockData(offset, prevBlock == null ? -1 - : prevBlock.getNextBlockOnDiskSizeWithHeader(), -1, false); + : prevBlock.getNextBlockOnDiskSizeWithHeader(), -1, true, false); String blockCacheKey = HFile.getBlockCacheKey(reader.getName(), offset); boolean isCached = blockCache.getBlock(blockCacheKey, true) != null; boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); Index: src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java (revision 1176491) +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java (working copy) @@ -139,7 +139,7 @@ @Override public HFileBlock readBlockData(long offset, long onDiskSize, - int uncompressedSize, boolean pread) throws IOException { + int uncompressedSize, boolean cacheBlock, boolean pread) throws IOException { if (offset == prevOffset && onDiskSize == prevOnDiskSize && uncompressedSize == prevUncompressedSize && pread == prevPread) { hitCount += 1; @@ -148,7 +148,7 @@ missCount += 1; prevBlock = realReader.readBlockData(offset, onDiskSize, - uncompressedSize, pread); + uncompressedSize, cacheBlock, pread); prevOffset = offset; prevOnDiskSize = onDiskSize; prevUncompressedSize = uncompressedSize; @@ -182,7 +182,7 @@ for (byte[] key : keys) { assertTrue(key != null); assertTrue(indexReader != null); - HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null); + HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, true, null); if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) { assertTrue(b == null); ++i; Index: src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java (revision 1176491) +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java (working copy) @@ -124,7 +124,7 @@ fsdis.seek(0); long curBlockPos = 0; while (curBlockPos <= trailer.getLastDataBlockOffset()) { - HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); + HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, true, false); assertEquals(BlockType.DATA, block.getBlockType()); ByteBuffer buf = block.getBufferWithoutHeader(); while (buf.hasRemaining()) { @@ -158,7 +158,7 @@ while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) { LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset()); - HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); + HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, true, false); assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); block.readInto(t); Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (revision 1176491) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (working copy) @@ -938,7 +938,7 @@ * @return the newly read block */ HFileBlock readBlockData(long offset, long onDiskSize, - int uncompressedSize, boolean pread) throws IOException; + int uncompressedSize, boolean cacheBlock, boolean pread) throws IOException; } /** A full-fledged reader with an iteration ability. */ @@ -991,7 +991,7 @@ public HFileBlock nextBlock() throws IOException { if (offset >= endOffset) return null; - HFileBlock b = readBlockData(offset, -1, -1, false); + HFileBlock b = readBlockData(offset, -1, -1, true, false); offset += b.getOnDiskSizeWithHeader(); return b; } @@ -1169,7 +1169,7 @@ */ @Override public HFileBlock readBlockData(long offset, long onDiskSizeWithMagic, - int uncompressedSizeWithMagic, boolean pread) throws IOException { + int uncompressedSizeWithMagic, boolean cacheBlock, boolean pread) throws IOException { if (uncompressedSizeWithMagic <= 0) { throw new IOException("Invalid uncompressedSize=" + uncompressedSizeWithMagic + " for a version 1 block"); @@ -1271,7 +1271,7 @@ */ @Override public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, - int uncompressedSize, boolean pread) throws IOException { + int uncompressedSize, boolean cacheBlock, boolean pread) throws IOException { if (offset < 0) { throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize=" + onDiskSizeWithHeaderL Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java (revision 1176491) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java (working copy) @@ -233,7 +233,7 @@ HFileBlock hfileBlock = fsBlockReader.readBlockData(offset, nextOffset - offset, metaBlockIndexReader.getRootBlockDataSize(block), - true); + cacheBlock, true); hfileBlock.expectType(BlockType.META); HFile.readTimeNano.addAndGet(System.nanoTime() - startTimeNs); @@ -304,7 +304,7 @@ } HFileBlock hfileBlock = fsBlockReader.readBlockData(offset, nextOffset - - offset, dataBlockIndexReader.getRootBlockDataSize(block), pread); + - offset, dataBlockIndexReader.getRootBlockDataSize(block), cacheBlock, pread); hfileBlock.expectType(BlockType.DATA); ByteBuffer buf = hfileBlock.getBufferWithoutHeader(); Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (revision 1176491) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (working copy) @@ -188,7 +188,7 @@ } HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset, - blockSize, -1, true); + blockSize, -1, cacheBlock, true); HFile.readTimeNano.addAndGet(System.nanoTime() - startTimeNs); HFile.readOps.incrementAndGet(); @@ -210,13 +210,14 @@ */ @Override public HFileBlock readBlockData(long offset, long onDiskSize, - int uncompressedSize, boolean pread) throws IOException { + int uncompressedSize, boolean cacheBlock, boolean pread) + throws IOException { if (onDiskSize >= Integer.MAX_VALUE) { throw new IOException("Invalid on-disk size: " + onDiskSize); } // Assuming we are not doing a compaction. - return readBlock(offset, (int) onDiskSize, true, pread, false); + return readBlock(offset, (int) onDiskSize, cacheBlock, pread, false); } /** @@ -269,7 +270,7 @@ // Load block from filesystem. long startTimeNs = System.nanoTime(); HFileBlock dataBlock = fsBlockReader.readBlockData(dataBlockOffset, - onDiskBlockSize, -1, pread); + onDiskBlockSize, -1, cacheBlock, pread); HFile.readTimeNano.addAndGet(System.nanoTime() - startTimeNs); HFile.readOps.incrementAndGet(); @@ -500,7 +501,7 @@ throws IOException { HFileBlock seekToBlock = ((HFileReaderV2) reader).getDataBlockIndexReader().seekToDataBlock( - key, offset, length, block); + key, offset, length, cacheBlocks, block); if (seekToBlock == null) { // This happens if the key e.g. falls before the beginning of the file. return -1; @@ -668,7 +669,7 @@ HFileReaderV2 reader2 = (HFileReaderV2) reader; HFileBlock seekToBlock = reader2.getDataBlockIndexReader().seekToDataBlock( - key, offset, length, block); + key, offset, length, cacheBlocks, block); if (seekToBlock == null) { return false; } @@ -687,7 +688,8 @@ // reader so that it does not have to read the header separately to // figure out the size. seekToBlock = reader2.fsBlockReader.readBlockData(previousBlockOffset, - seekToBlock.getOffset() - previousBlockOffset, -1, pread); + seekToBlock.getOffset() - previousBlockOffset, -1, cacheBlocks, + pread); // TODO shortcut: seek forward in this block to the last key of the // block. Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (revision 1176491) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (working copy) @@ -176,7 +176,7 @@ * @throws IOException */ public HFileBlock seekToDataBlock(final byte[] key, int keyOffset, - int keyLength, HFileBlock currentBlock) + int keyLength, boolean cacheBlock, HFileBlock currentBlock) throws IOException { int rootLevelIndex = rootBlockContainingKey(key, keyOffset, keyLength); if (rootLevelIndex < 0 || rootLevelIndex >= blockOffsets.length) { @@ -189,7 +189,7 @@ int lookupLevel = 1; // How many levels deep we are in our lookup. HFileBlock block = blockReader.readBlockData(currentOffset, - currentOnDiskSize, -1, true); + currentOnDiskSize, -1, cacheBlock, true); if (block == null) { throw new IOException("Failed to read block at offset " + currentOffset + ", onDiskSize=" + currentOnDiskSize); @@ -224,7 +224,7 @@ block = currentBlock; } else { block = blockReader.readBlockData(currentOffset, currentOnDiskSize, - -1, true); + -1, cacheBlock, true); } } @@ -257,7 +257,7 @@ "no block reader available"); } HFileBlock midLeafBlock = blockReader.readBlockData( - midLeafBlockOffset, midLeafBlockOnDiskSize, -1, true); + midLeafBlockOffset, midLeafBlockOnDiskSize, -1, true, true); ByteBuffer b = midLeafBlock.getBufferWithoutHeader(); int numDataBlocks = b.getInt(); int keyRelOffset = b.getInt(Bytes.SIZEOF_INT * (midKeyEntry + 1));