From 6d9d6e7d87e2491652fe1fb06052a4757680a1ce Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Wed, 11 Jun 2014 13:55:49 -0700 Subject: [PATCH] HBASE-11331 [blockcache] lazy block decompression --- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 326 ++++++++++++--------- .../hadoop/hbase/io/hfile/HFileReaderV2.java | 16 +- .../apache/hadoop/hbase/io/hfile/TestChecksum.java | 11 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 18 +- .../io/hfile/TestHFileBlockCompatibility.java | 7 +- .../hadoop/hbase/io/hfile/TestHFileEncryption.java | 5 +- .../hadoop/hbase/io/hfile/TestHFileWriterV2.java | 15 +- .../hadoop/hbase/io/hfile/TestHFileWriterV3.java | 15 +- 8 files changed, 232 insertions(+), 181 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b39eec2..84378a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -64,25 +64,26 @@ import com.google.common.base.Preconditions; * information from the block index are required to read a block. *
  • In version 2 a block is structured as follows: * * - * The version 2 block representation in the block cache is the same as above, - * except that the data section is always uncompressed in the cache. */ @InterfaceAudience.Private public class HFileBlock implements Cacheable { @@ -111,7 +112,7 @@ public class HFileBlock implements Cacheable { ByteBuffer.wrap(new byte[0], 0, 0).getClass(), false); // meta.usesHBaseChecksum+offset+nextBlockOnDiskSizeWithHeader - public static final int EXTRA_SERIALIZATION_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT + public static final int EXTRA_SERIALIZATION_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG; /** @@ -155,23 +156,25 @@ public class HFileBlock implements Cacheable { .registerDeserializer(blockDeserializer); } + /** Type of block. Header field 0. */ private BlockType blockType; - /** Size on disk without the header. It includes checksum data too. */ + /** Size on disk excluding header, including checksum. Header field 1. */ private int onDiskSizeWithoutHeader; - /** Size of pure data. Does not include header or checksums */ + /** Size of pure data. Does not include header or checksums. Header field 2. */ private final int uncompressedSizeWithoutHeader; - /** The offset of the previous block on disk */ + /** The offset of the previous block on disk. Header field 3. */ private final long prevBlockOffset; - /** Size on disk of header and data. Does not include checksum data */ + /** Size on disk of header + data. Excludes checksum. Header field 6. */ private final int onDiskDataSizeWithHeader; /** The in-memory representation of the hfile block */ private ByteBuffer buf; - /** Meta data that holds meta information on the hfileblock**/ + + /** Meta data that holds meta information on the hfileblock */ private HFileContext fileContext; /** @@ -193,27 +196,18 @@ public class HFileBlock implements Cacheable { * and is sitting in a byte buffer. * * @param blockType the type of this block, see {@link BlockType} - * @param onDiskSizeWithoutHeader compressed size of the block if compression - * is used, otherwise uncompressed size, header size not included - * @param uncompressedSizeWithoutHeader uncompressed size of the block, - * header size not included. Equals onDiskSizeWithoutHeader if - * compression is disabled. - * @param prevBlockOffset the offset of the previous block in the - * {@link HFile} + * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} + * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} + * @param prevBlockOffset see {@link #prevBlockOffset} * @param buf block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) followed by * uncompressed data. This - * @param fillHeader true to fill in the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of - * the buffer based on the header fields provided + * @param fillHeader when true, parse {@code buf} and override the first 4 header fields. * @param offset the file offset the block was read from - * @param bytesPerChecksum the number of bytes per checksum chunk - * @param checksumType the checksum algorithm to use - * @param onDiskDataSizeWithHeader size of header and data on disk not - * including checksum data + * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} * @param fileContext HFile meta data */ - HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer buf, - boolean fillHeader, long offset, + HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, + long prevBlockOffset, ByteBuffer buf, boolean fillHeader, long offset, int onDiskDataSizeWithHeader, HFileContext fileContext) { this.blockType = blockType; this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader; @@ -228,6 +222,21 @@ public class HFileBlock implements Cacheable { } /** + * Copy constructor. Creates a shallow copy of {@code that}'s buffer. + */ + HFileBlock(HFileBlock that) { + this.blockType = that.blockType; + this.onDiskSizeWithoutHeader = that.onDiskSizeWithoutHeader; + this.uncompressedSizeWithoutHeader = that.uncompressedSizeWithoutHeader; + this.prevBlockOffset = that.prevBlockOffset; + this.buf = that.buf.duplicate(); + this.offset = that.offset; + this.onDiskDataSizeWithHeader = that.onDiskDataSizeWithHeader; + this.fileContext = that.fileContext; + this.nextBlockOnDiskSizeWithHeader = that.nextBlockOnDiskSizeWithHeader; + } + + /** * Creates a block from an existing buffer starting with a header. Rewinds * and takes ownership of the buffer. By definition of rewind, ignores the * buffer position, but if you slice the buffer beforehand, it will rewind @@ -272,28 +281,21 @@ public class HFileBlock implements Cacheable { } /** - * @return the on-disk size of the block with header size included. This - * includes the header, the data and the checksum data. + * @return the on-disk size of header + data part + checksum. */ public int getOnDiskSizeWithHeader() { return onDiskSizeWithoutHeader + headerSize(); } /** - * Returns the size of the compressed part of the block in case compression - * is used, or the uncompressed size of the data part otherwise. Header size - * and checksum data size is not included. - * - * @return the on-disk size of the data part of the block, header and - * checksum not included. + * @return the on-disk size of the data part + checksum (header excluded). */ public int getOnDiskSizeWithoutHeader() { return onDiskSizeWithoutHeader; } /** - * @return the uncompressed size of the data part of the block, header not - * included + * @return the uncompressed size of data part (header and checksum excluded). */ public int getUncompressedSizeWithoutHeader() { return uncompressedSizeWithoutHeader; @@ -308,8 +310,8 @@ public class HFileBlock implements Cacheable { } /** - * Writes header fields into the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the - * buffer. Resets the buffer position to the end of header as side effect. + * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position + * is modified as side-effect. */ private void overwriteHeader() { buf.rewind(); @@ -320,11 +322,9 @@ public class HFileBlock implements Cacheable { } /** - * Returns a buffer that does not include the header. The array offset points - * to the start of the block data right after the header. The underlying data - * array is not copied. Checksum data is not included in the returned buffer. + * Returns a buffer that does not include the header or checksum. * - * @return the buffer with header skipped + * @return the buffer with header skipped and checksum omitted. */ public ByteBuffer getBufferWithoutHeader() { return ByteBuffer.wrap(buf.array(), buf.arrayOffset() + headerSize(), @@ -336,7 +336,7 @@ public class HFileBlock implements Cacheable { * modify the buffer object. This method has to be public because it is * used in {@link CompoundBloomFilter} to avoid object creation on every * Bloom filter lookup, but has to be used with caution. Checksum data - * is not included in the returned buffer. + * is not included in the returned buffer but header data is. * * @return the buffer of this block for read-only operations */ @@ -350,17 +350,17 @@ public class HFileBlock implements Cacheable { * not modify the buffer object. This method has to be public because it is * used in {@link BucketCache} to avoid buffer copy. * - * @return the byte buffer with header included for read-only operations + * @return the buffer with header and checksum included for read-only operations */ public ByteBuffer getBufferReadOnlyWithHeader() { return ByteBuffer.wrap(buf.array(), buf.arrayOffset(), buf.limit()).slice(); } /** - * Returns a byte buffer of this block, including header data, positioned at + * Returns a byte buffer of this block, including header data and checksum, positioned at * the beginning of header. The underlying data array is not copied. * - * @return the byte buffer with header included + * @return the byte buffer with header and checksum included */ ByteBuffer getBufferWithHeader() { ByteBuffer dupBuf = buf.duplicate(); @@ -376,22 +376,25 @@ public class HFileBlock implements Cacheable { } } + private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromField) + throws IOException { + if (valueFromBuf != valueFromField) { + throw new IOException("Block type stored in the buffer: " + + valueFromBuf + ", block type field: " + valueFromField); + } + } + /** * Checks if the block is internally consistent, i.e. the first - * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a valid header consistent - * with the fields. This function is primary for testing and debugging, and - * is not thread-safe, because it alters the internal buffer pointer. + * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a + * valid header consistent with the fields. Assumes a packed block structure. + * This function is primary for testing and debugging, and is not + * thread-safe, because it alters the internal buffer pointer. */ void sanityCheck() throws IOException { buf.rewind(); - { - BlockType blockTypeFromBuf = BlockType.read(buf); - if (blockTypeFromBuf != blockType) { - throw new IOException("Block type stored in the buffer: " + - blockTypeFromBuf + ", block type field: " + blockType); - } - } + sanityCheckAssertion(BlockType.read(buf), blockType); sanityCheckAssertion(buf.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader"); @@ -403,26 +406,23 @@ public class HFileBlock implements Cacheable { if (this.fileContext.isUseHBaseChecksum()) { sanityCheckAssertion(buf.get(), this.fileContext.getChecksumType().getCode(), "checksumType"); sanityCheckAssertion(buf.getInt(), this.fileContext.getBytesPerChecksum(), "bytesPerChecksum"); - sanityCheckAssertion(buf.getInt(), onDiskDataSizeWithHeader, - "onDiskDataSizeWithHeader"); + sanityCheckAssertion(buf.getInt(), onDiskDataSizeWithHeader, "onDiskDataSizeWithHeader"); } int cksumBytes = totalChecksumBytes(); - int hdrSize = headerSize(); - int expectedBufLimit = uncompressedSizeWithoutHeader + headerSize() + - cksumBytes; + int expectedBufLimit = onDiskDataSizeWithHeader + cksumBytes; if (buf.limit() != expectedBufLimit) { throw new AssertionError("Expected buffer limit " + expectedBufLimit + ", got " + buf.limit()); } // We might optionally allocate HFILEBLOCK_HEADER_SIZE more bytes to read the next - // block's, header, so there are two sensible values for buffer capacity. - int size = uncompressedSizeWithoutHeader + hdrSize + cksumBytes; - if (buf.capacity() != size && - buf.capacity() != size + hdrSize) { + // block's header, so there are two sensible values for buffer capacity. + int hdrSize = headerSize(); + if (buf.capacity() != expectedBufLimit && + buf.capacity() != expectedBufLimit + hdrSize) { throw new AssertionError("Invalid buffer capacity: " + buf.capacity() + - ", expected " + size + " or " + (size + hdrSize)); + ", expected " + expectedBufLimit + " or " + (expectedBufLimit + hdrSize)); } } @@ -457,6 +457,61 @@ public class HFileBlock implements Cacheable { } /** + * @return true when on-disk blocks from this file are compressed, and/or encrypted; + * false otherwise. + */ + private static boolean isCompressedOrEncrypted(HFileContext fileContext) { + Algorithm compressAlgo = fileContext.getCompression(); + boolean isCompressed = + compressAlgo != null + && compressAlgo != Compression.Algorithm.NONE; + + Encryption.Context cryptoContext = fileContext.getEncryptionContext(); + boolean isEncrypted = cryptoContext != null + && cryptoContext != Encryption.Context.NONE; + + return isCompressed || isEncrypted; + } + + /** + * Retrieves the decompressed/decrypted view of this block. Internal structures are shared + * between instances where applicable. + */ + HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException { + if (!isCompressedOrEncrypted(fileContext)) { + // cannot use our own fileContext here because HFileBlock(ByteBuffer, boolean) does not + // preserve encoding and encryption details. + return this; + } + + HFileBlock unpacked = new HFileBlock(this); + unpacked.allocateBuffer(); // allocates space for the decompressed block + + HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA ? + reader.getBlockDecodingContext() : reader.getDefaultBlockDecodingContext(); + ctx.prepareDecoding(unpacked.getOnDiskSizeWithoutHeader(), + unpacked.getUncompressedSizeWithoutHeader(), unpacked.getBufferWithoutHeader(), + this.getBufferReadOnlyWithHeader().array(), this.headerSize()); + + if (unpacked.nextBlockOnDiskSizeWithHeader > 0) { + // Preserve the next block's header bytes in the new block if we have them. + System.arraycopy(this.buf.array(), this.buf.arrayOffset() + this.onDiskDataSizeWithHeader, + unpacked.buf.array(), unpacked.buf.arrayOffset() + unpacked.headerSize() + + unpacked.uncompressedSizeWithoutHeader + unpacked.totalChecksumBytes(), + unpacked.headerSize()); + } + return unpacked; + } + + /** + * Allocates a new buffer of the correct size for decoded data. + * @see #allocateBuffer(boolean) + */ + private void allocateBuffer() { + allocateBuffer(this.nextBlockOnDiskSizeWithHeader > 0); + } + + /** * Always allocates a new buffer of the correct size. Copies header bytes * from the existing buffer. Does not change header fields. * Reserve room to keep checksum bytes too. @@ -466,18 +521,19 @@ public class HFileBlock implements Cacheable { */ private void allocateBuffer(boolean extraBytes) { int cksumBytes = totalChecksumBytes(); - int capacityNeeded = headerSize() + uncompressedSizeWithoutHeader + - cksumBytes + - (extraBytes ? headerSize() : 0); + int headerSize = headerSize(); + int capacityNeeded = headerSize + uncompressedSizeWithoutHeader + + cksumBytes + (extraBytes ? headerSize : 0); ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded); // Copy header bytes. System.arraycopy(buf.array(), buf.arrayOffset(), newBuf.array(), - newBuf.arrayOffset(), headerSize()); + newBuf.arrayOffset(), headerSize); buf = newBuf; - buf.limit(headerSize() + uncompressedSizeWithoutHeader + cksumBytes); + // set limit to exclude next block's header + buf.limit(headerSize + uncompressedSizeWithoutHeader + cksumBytes); } /** An additional sanity-check in case no compression is being used. */ @@ -512,7 +568,7 @@ public class HFileBlock implements Cacheable { } /** - * @return a byte stream reading the data section of this block + * @return a byte stream reading the data + checksum of this block */ public DataInputStream getByteStream() { return new DataInputStream(new ByteArrayInputStream(buf.array(), @@ -588,7 +644,6 @@ public class HFileBlock implements Cacheable { return nextBlockOnDiskSizeWithHeader; } - /** * Unified version 2 {@link HFile} block writer. The intended usage pattern * is as follows: @@ -631,7 +686,7 @@ public class HFileBlock implements Cacheable { /** * Current block type. Set in {@link #startWriting(BlockType)}. Could be - * changed in {@link #encodeDataBlockForDisk()} from {@link BlockType#DATA} + * changed in {@link #finishBlock()} from {@link BlockType#DATA} * to {@link BlockType#ENCODED_DATA}. */ private BlockType blockType; @@ -648,8 +703,7 @@ public class HFileBlock implements Cacheable { /** * Bytes to be written to the file system, including the header. Compressed - * if compression is turned on. It also includes the checksum data that - * immediately follows the block data. (header + data + checksums) + * if compression is turned on. Does not include checksum bytes. (header + data) */ private byte[] onDiskBytesWithHeader; @@ -1119,6 +1173,12 @@ public class HFileBlock implements Cacheable { /** Closes the backing streams */ void closeStreams() throws IOException; + + /** Get a decoder for {@link BlockType#ENCODED_DATA} blocks from this file. */ + HFileBlockDecodingContext getBlockDecodingContext(); + + /** Get the default decoder for blocks from this file. */ + HFileBlockDecodingContext getDefaultBlockDecodingContext(); } /** @@ -1276,7 +1336,8 @@ public class HFileBlock implements Cacheable { private HFileBlockDecodingContext encodedBlockDecodingCtx; - private HFileBlockDefaultDecodingContext defaultDecodingCtx; + /** Default context used when BlockType != {@link BlockType#ENCODED_DATA}. */ + private final HFileBlockDefaultDecodingContext defaultDecodingCtx; private ThreadLocal prefetchedHeaderForThread = new ThreadLocal() { @@ -1292,10 +1353,8 @@ public class HFileBlock implements Cacheable { this.streamWrapper = stream; // Older versions of HBase didn't support checksum. this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum()); - defaultDecodingCtx = - new HFileBlockDefaultDecodingContext(fileContext); - encodedBlockDecodingCtx = - new HFileBlockDefaultDecodingContext(fileContext); + defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext); + encodedBlockDecodingCtx = defaultDecodingCtx; } /** @@ -1436,9 +1495,8 @@ public class HFileBlock implements Cacheable { HFileBlock b = null; if (onDiskSizeWithHeader > 0) { - // We know the total on-disk size but not the uncompressed size. Read - // the entire block into memory, then parse the header and decompress - // from memory if using compression. This code path is used when + // We know the total on-disk size. Read the entire block into memory, + // then parse the header. This code path is used when // doing a random read operation relying on the block index, as well as // when the client knows the on-disk size from peeking into the next // block's header (e.g. this block's header) when reading the previous @@ -1446,7 +1504,8 @@ public class HFileBlock implements Cacheable { // Size that we have to skip in case we have already read the header. int preReadHeaderSize = headerBuf == null ? 0 : hdrSize; - onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize]; + onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize]; // room for this block plus the + // next block's header nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, preReadHeaderSize, onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread); @@ -1459,11 +1518,10 @@ public class HFileBlock implements Cacheable { headerBuf = ByteBuffer.wrap(onDiskBlock, 0, hdrSize); } // We know the total on-disk size but not the uncompressed size. Read - // the entire block into memory, then parse the header and decompress - // from memory if using compression. Here we have already read the - // block's header + // the entire block into memory, then parse the header. Here we have + // already read the block's header try { - b = new HFileBlock(headerBuf, this.fileContext.isUseHBaseChecksum()); + b = new HFileBlock(headerBuf, fileContext.isUseHBaseChecksum()); } catch (IOException ex) { // Seen in load testing. Provide comprehensive debug info. throw new IOException("Failed to read compressed block at " @@ -1501,61 +1559,29 @@ public class HFileBlock implements Cacheable { readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false, offset, pread); } - b = new HFileBlock(headerBuf, this.fileContext.isUseHBaseChecksum()); + b = new HFileBlock(headerBuf, fileContext.isUseHBaseChecksum()); onDiskBlock = new byte[b.getOnDiskSizeWithHeader() + hdrSize]; - System.arraycopy(headerBuf.array(), - headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize); + System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize); nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, hdrSize, b.getOnDiskSizeWithHeader() - hdrSize, true, offset + hdrSize, pread); onDiskSizeWithHeader = b.onDiskSizeWithoutHeader + hdrSize; } - Algorithm compressAlgo = fileContext.getCompression(); - boolean isCompressed = - compressAlgo != null - && compressAlgo != Compression.Algorithm.NONE; - - Encryption.Context cryptoContext = fileContext.getEncryptionContext(); - boolean isEncrypted = cryptoContext != null - && cryptoContext != Encryption.Context.NONE; - - if (!isCompressed && !isEncrypted) { + if (!isCompressedOrEncrypted(fileContext)) { b.assumeUncompressed(); } - if (verifyChecksum && - !validateBlockChecksum(b, onDiskBlock, hdrSize)) { + if (verifyChecksum && !validateBlockChecksum(b, onDiskBlock, hdrSize)) { return null; // checksum mismatch } - if (isCompressed || isEncrypted) { - // This will allocate a new buffer but keep header bytes. - b.allocateBuffer(nextBlockOnDiskSize > 0); - if (b.blockType == BlockType.ENCODED_DATA) { - encodedBlockDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(), - b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock, - hdrSize); - } else { - defaultDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(), - b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock, - hdrSize); - } - if (nextBlockOnDiskSize > 0) { - // Copy next block's header bytes into the new block if we have them. - System.arraycopy(onDiskBlock, onDiskSizeWithHeader, b.buf.array(), - b.buf.arrayOffset() + hdrSize - + b.uncompressedSizeWithoutHeader + b.totalChecksumBytes(), - hdrSize); - } - } else { - // The onDiskBlock will become the headerAndDataBuffer for this block. - // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already - // contains the header of next block, so no need to set next - // block's header in it. - b = new HFileBlock(ByteBuffer.wrap(onDiskBlock, 0, - onDiskSizeWithHeader), this.fileContext.isUseHBaseChecksum()); - } + // The onDiskBlock will become the headerAndDataBuffer for this block. + // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already + // contains the header of next block, so no need to set next + // block's header in it. + b = new HFileBlock(ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader), + this.fileContext.isUseHBaseChecksum()); b.nextBlockOnDiskSizeWithHeader = nextBlockOnDiskSize; @@ -1580,16 +1606,25 @@ public class HFileBlock implements Cacheable { encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(this.fileContext); } + @Override + public HFileBlockDecodingContext getBlockDecodingContext() { + return this.encodedBlockDecodingCtx; + } + + @Override + public HFileBlockDecodingContext getDefaultBlockDecodingContext() { + return this.defaultDecodingCtx; + } + /** * Generates the checksum for the header as well as the data and * then validates that it matches the value stored in the header. * If there is a checksum mismatch, then return false. Otherwise * return true. */ - protected boolean validateBlockChecksum(HFileBlock block, - byte[] data, int hdrSize) throws IOException { - return ChecksumUtil.validateBlockChecksum(path, block, - data, hdrSize); + protected boolean validateBlockChecksum(HFileBlock block, byte[] data, int hdrSize) + throws IOException { + return ChecksumUtil.validateBlockChecksum(path, block, data, hdrSize); } @Override @@ -1685,6 +1720,7 @@ public class HFileBlock implements Cacheable { return this.fileContext.getBytesPerChecksum(); } + /** @return the size of data on disk + header. Excludes checksum. */ int getOnDiskDataSizeWithHeader() { return this.onDiskDataSizeWithHeader; } @@ -1738,6 +1774,10 @@ public class HFileBlock implements Cacheable { return DUMMY_HEADER_NO_CHECKSUM; } + /** + * @return the HFileContext used to create this HFileBlock. Not necessary the + * fileContext for the file from which this block's data was originally read. + */ public HFileContext getHFileContext() { return this.fileContext; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index 209815a..e2dfcdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -146,17 +146,18 @@ public class HFileReaderV2 extends AbstractHFileReader { // Data index. We also read statistics about the block index written after // the root level. dataBlockIndexReader.readMultiLevelIndexRoot( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).unpack(hfileContext, fsBlockReader), trailer.getDataIndexCount()); // Meta index. metaBlockIndexReader.readRootIndex( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).unpack(hfileContext, fsBlockReader), trailer.getMetaIndexCount()); // File info fileInfo = new FileInfo(); - fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); + fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO) + .unpack(hfileContext, fsBlockReader).getByteStream()); lastKey = fileInfo.get(FileInfo.LASTKEY); avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN)); avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN)); @@ -177,7 +178,7 @@ public class HFileReaderV2 extends AbstractHFileReader { // Store all other load-on-open blocks for further consumption. HFileBlock b; while ((b = blockIter.nextBlock()) != null) { - loadOnOpenBlocks.add(b); + loadOnOpenBlocks.add(b.unpack(hfileContext, fsBlockReader)); } // Prefetch file blocks upon open if requested @@ -338,6 +339,7 @@ public class HFileReaderV2 extends AbstractHFileReader { HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, false, true, true, BlockType.META, null); if (cachedBlock != null) { + cachedBlock = cachedBlock.unpack(hfileContext, fsBlockReader); // Return a distinct 'shallow copy' of the block, // so pos does not get messed by the scanner return cachedBlock.getBufferWithoutHeader(); @@ -357,7 +359,7 @@ public class HFileReaderV2 extends AbstractHFileReader { cacheConf.isInMemory()); } - return metaBlock.getBufferWithoutHeader(); + return metaBlock.unpack(hfileContext, fsBlockReader).getBufferWithoutHeader(); } } @@ -436,7 +438,7 @@ public class HFileReaderV2 extends AbstractHFileReader { + dataBlockEncoder.getDataBlockEncoding() + ")"); } } - return cachedBlock; + return cachedBlock.unpack(hfileContext, fsBlockReader); } // Carry on, please load. } @@ -466,7 +468,7 @@ public class HFileReaderV2 extends AbstractHFileReader { HFile.dataBlockReadCnt.incrementAndGet(); } - return hfileBlock; + return hfileBlock.unpack(hfileContext, fsBlockReader); } } finally { traceScope.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java index 020a293..6770466 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java @@ -125,7 +125,8 @@ public class TestChecksum { assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); // read data back from the hfile, exclude header and checksum - ByteBuffer bb = b.getBufferWithoutHeader(); // read back data + HFileBlock unpacked = b.unpack(meta, hbr); + ByteBuffer bb = unpacked.getBufferWithoutHeader(); // read back data DataInputStream in = new DataInputStream( new ByteArrayInputStream( bb.array(), bb.arrayOffset(), bb.limit())); @@ -164,6 +165,7 @@ public class TestChecksum { b = hbr.readBlockData(0, -1, -1, pread); is.close(); b.sanityCheck(); + b = b.unpack(meta, hbr); assertEquals(4936, b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); @@ -274,12 +276,7 @@ public class TestChecksum { // validate data for (int i = 0; i < 1234; i++) { int val = in.readInt(); - if (val != i) { - String msg = "testChecksumCorruption: data mismatch at index " + - i + " expected " + i + " found " + val; - LOG.warn(msg); - assertEquals(i, val); - } + assertEquals("testChecksumCorruption: data mismatch at index " + i, i, val); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index 37456a8..e3d80aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -424,8 +424,12 @@ public class TestHFileBlock { ByteBuffer actualBuffer = b.getBufferWithoutHeader(); if (encoding != DataBlockEncoding.NONE) { // We expect a two-byte big-endian encoding id. - assertEquals(0, actualBuffer.get(0)); - assertEquals(encoding.getId(), actualBuffer.get(1)); + assertEquals( + "Unexpected first byte with " + buildMessageDetails(algo, encoding, pread), + Long.toHexString(0), Long.toHexString(actualBuffer.get(0))); + assertEquals( + "Unexpected second byte with " + buildMessageDetails(algo, encoding, pread), + Long.toHexString(encoding.getId()), Long.toHexString(actualBuffer.get(1))); actualBuffer.position(2); actualBuffer = actualBuffer.slice(); } @@ -442,6 +446,11 @@ public class TestHFileBlock { } } + static String buildMessageDetails(Algorithm compression, DataBlockEncoding encoding, + boolean pread) { + return String.format("compression %s, encoding %s, pread %s", compression, encoding, pread); + } + static void assertBuffersEqual(ByteBuffer expectedBuffer, ByteBuffer actualBuffer, Compression.Algorithm compression, DataBlockEncoding encoding, boolean pread) { @@ -454,9 +463,8 @@ public class TestHFileBlock { } fail(String.format( - "Content mismath for compression %s, encoding %s, " + - "pread %s, commonPrefix %d, expected %s, got %s", - compression, encoding, pread, prefix, + "Content mismatch for %s, commonPrefix %d, expected %s, got %s", + buildMessageDetails(compression, encoding, pread), prefix, nextBytesToStr(expectedBuffer, prefix), nextBytesToStr(actualBuffer, prefix))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java index 88fdb77..e56c0cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java @@ -301,6 +301,7 @@ public class TestHFileBlockCompatibility { for (int blockId = 0; blockId < numBlocks; ++blockId) { b = hbr.readBlockData(pos, -1, -1, pread); b.sanityCheck(); + b = b.unpack(meta, hbr); pos += b.getOnDiskSizeWithHeader(); assertEquals((int) encodedSizes.get(blockId), @@ -335,7 +336,7 @@ public class TestHFileBlockCompatibility { * in this class but the code in HFileBlock.Writer will continually * evolve. */ - public static final class Writer extends HFileBlock.Writer{ + public static final class Writer extends HFileBlock.Writer { // These constants are as they were in minorVersion 0. private static final int HEADER_SIZE = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; @@ -416,10 +417,6 @@ public class TestHFileBlockCompatibility { private int unencodedDataSizeWritten; - /** - * @param compressionAlgorithm compression algorithm to use - * @param dataBlockEncoderAlgo data block encoding algorithm to use - */ public Writer(Compression.Algorithm compressionAlgorithm, HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS, boolean includesTag) { this(dataBlockEncoder, new HFileContextBuilder().withHBaseCheckSum(false) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index 31546e2..c624d3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -95,11 +95,12 @@ public class TestHFileEncryption { return hbw.getOnDiskSizeWithHeader(); } - private long readAndVerifyBlock(long pos, HFileBlock.FSReaderV2 hbr, int size) + private long readAndVerifyBlock(long pos, HFileContext ctx, HFileBlock.FSReaderV2 hbr, int size) throws IOException { HFileBlock b = hbr.readBlockData(pos, -1, -1, false); assertEquals(0, HFile.getChecksumFailuresCount()); b.sanityCheck(); + b = b.unpack(ctx, hbr); LOG.info("Read a block at " + pos + " with" + " onDiskSizeWithHeader=" + b.getOnDiskSizeWithHeader() + " uncompressedSizeWithoutHeader=" + b.getOnDiskSizeWithoutHeader() + @@ -142,7 +143,7 @@ public class TestHFileEncryption { HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, totalSize, fileContext); long pos = 0; for (int i = 0; i < blocks; i++) { - pos += readAndVerifyBlock(pos, hbr, blockSizes[i]); + pos += readAndVerifyBlock(pos, fileContext, hbr, blockSizes[i]); } } finally { is.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java index 27e7051..bc9689c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java @@ -160,7 +160,7 @@ public class TestHFileWriterV2 { // Data index. We also read statistics about the block index written after // the root level. dataBlockIndexReader.readMultiLevelIndexRoot( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).unpack(meta, blockReader), trailer.getDataIndexCount()); if (findMidKey) { @@ -170,11 +170,12 @@ public class TestHFileWriterV2 { // Meta index. metaBlockIndexReader.readRootIndex( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), - trailer.getMetaIndexCount()); + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX) + .unpack(meta, blockReader).getByteStream(), trailer.getMetaIndexCount()); // File info FileInfo fileInfo = new FileInfo(); - fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); + fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO) + .unpack(meta, blockReader).getByteStream()); byte [] keyValueFormatVersion = fileInfo.get( HFileWriterV2.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && @@ -189,7 +190,8 @@ public class TestHFileWriterV2 { fsdis.seek(0); long curBlockPos = 0; while (curBlockPos <= trailer.getLastDataBlockOffset()) { - HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); + HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false) + .unpack(meta, blockReader); assertEquals(BlockType.DATA, block.getBlockType()); ByteBuffer buf = block.getBufferWithoutHeader(); while (buf.hasRemaining()) { @@ -232,7 +234,8 @@ public class TestHFileWriterV2 { while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) { LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset()); - HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); + HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false) + .unpack(meta, blockReader); assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); ByteBuffer buf = block.getBufferWithoutHeader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index a2abefe..4c54d12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -191,7 +191,7 @@ public class TestHFileWriterV3 { // Data index. We also read statistics about the block index written after // the root level. dataBlockIndexReader.readMultiLevelIndexRoot( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).unpack(context, blockReader), trailer.getDataIndexCount()); if (findMidKey) { @@ -201,11 +201,12 @@ public class TestHFileWriterV3 { // Meta index. metaBlockIndexReader.readRootIndex( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), - trailer.getMetaIndexCount()); + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX) + .unpack(context, blockReader).getByteStream(), trailer.getMetaIndexCount()); // File info FileInfo fileInfo = new FileInfo(); - fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); + fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO) + .unpack(context, blockReader).getByteStream()); byte [] keyValueFormatVersion = fileInfo.get( HFileWriterV3.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && @@ -220,7 +221,8 @@ public class TestHFileWriterV3 { fsdis.seek(0); long curBlockPos = 0; while (curBlockPos <= trailer.getLastDataBlockOffset()) { - HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); + HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false) + .unpack(context, blockReader); assertEquals(BlockType.DATA, block.getBlockType()); ByteBuffer buf = block.getBufferWithoutHeader(); int keyLen = -1; @@ -278,7 +280,8 @@ public class TestHFileWriterV3 { while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) { LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset()); - HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); + HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false) + .unpack(context, blockReader); assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); ByteBuffer buf = block.getBufferWithoutHeader(); -- 1.9.0