diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 72f144df..9a7b08c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -23,7 +23,6 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -112,6 +111,7 @@ import com.google.common.base.Preconditions; public class HFileBlock implements Cacheable { private static final Log LOG = LogFactory.getLog(HFileBlock.class); + // Block header fields. /** Type of block. Header field 0. */ private BlockType blockType; @@ -139,7 +139,7 @@ public class HFileBlock implements Cacheable { * @see Writer#putHeader(byte[], int, int, int, int) */ private int onDiskDataSizeWithHeader; - + // End of Block Header Fields. /** * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by @@ -153,7 +153,7 @@ public class HFileBlock implements Cacheable { *

We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be - * good if could be confined to cache-use only but hard-to-do. + * good if could be confined to cache-use only but hard-to-do. TODO. */ private ByteBuff buf; @@ -167,20 +167,10 @@ public class HFileBlock implements Cacheable { */ private long offset = UNSET; - private MemoryType memType = MemoryType.EXCLUSIVE; - /** - * The on-disk size of the next block, including the header and checksums if present, obtained by - * peeking into the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's - * header, or UNSET if unknown. - * - * Blocks try to carry the size of the next block to read in this data member. They will even have - * this value when served from cache. Could save a seek in the case where we are iterating through - * a file and some of the blocks come from cache. If from cache, then having this info to hand - * will save us doing a seek to read the header so we can read the body of a block. - * TODO: see how effective this is at saving seeks. + * Whether this file block is exclusive owner of the memory it occupies. */ - private int nextBlockOnDiskSize = UNSET; + private MemoryType memType = MemoryType.EXCLUSIVE; /** * On a checksum failure, do these many succeeding read requests using hdfs checksums before @@ -189,8 +179,6 @@ public class HFileBlock implements Cacheable { static final int CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3; private static int UNSET = -1; - public static final boolean FILL_HEADER = true; - public static final boolean DONT_FILL_HEADER = false; // How to get the estimate correctly? if it is a singleBB? public static final int MULTI_BYTE_BUFFER_HEAP_SIZE = @@ -198,12 +186,12 @@ public class HFileBlock implements Cacheable { /** * Space for metadata on a block that gets stored along with the block when we cache it. - * There are a few bytes stuck on the end of the HFileBlock that we pull in from HDFS (note, - * when we read from HDFS, we pull in an HFileBlock AND the header of the next block if one). + * There are a few bytes stuck on the end of the HFileBlock that we pull in from HDFS. * 8 bytes are offset of this block (long) in the file. Offset is important because * used when we remake the CacheKey when we return the block to cache when done. There is also * a flag on whether checksumming is being done by hbase or not. See class comment for note on * uncertain state of checksumming of blocks that come out of cache (should we or should we not?). + * TODO: !!! NEEDED? * Finally there 4 bytes to hold the length of the next block which can save a seek on occasion. *

This EXTRA came in with original commit of the bucketcache, HBASE-7404. Was formerly * known as EXTRA_SERIALIZATION_SPACE. @@ -253,9 +241,8 @@ public class HFileBlock implements Cacheable { buf.limit(buf.limit() + HFileBlock.BLOCK_METADATA_SPACE); boolean usesChecksum = buf.get() == (byte)1; long offset = buf.getLong(); - int nextBlockOnDiskSize = buf.getInt(); - HFileBlock hFileBlock = - new HFileBlock(newByteBuff, usesChecksum, memType, offset, nextBlockOnDiskSize, null); + // TODO: AN INT MORE TO READ HERE!! + HFileBlock hFileBlock = new HFileBlock(newByteBuff, usesChecksum, memType, offset, null); return hFileBlock; } @@ -308,7 +295,7 @@ public class HFileBlock implements Cacheable { * Copy constructor. Creates a shallow/deep copy of {@code that}'s buffer as per the boolean * param. */ - private HFileBlock(HFileBlock that,boolean bufCopy) { + private HFileBlock(HFileBlock that, boolean bufCopy) { this.blockType = that.blockType; this.onDiskSizeWithoutHeader = that.onDiskSizeWithoutHeader; this.uncompressedSizeWithoutHeader = that.uncompressedSizeWithoutHeader; @@ -321,7 +308,6 @@ public class HFileBlock implements Cacheable { this.offset = that.offset; this.onDiskDataSizeWithHeader = that.onDiskDataSizeWithHeader; this.fileContext = that.fileContext; - this.nextBlockOnDiskSize = that.nextBlockOnDiskSize; } /** @@ -340,20 +326,16 @@ public class HFileBlock implements Cacheable { * @param prevBlockOffset see {@link #prevBlockOffset} * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) followed by * uncompressed data. - * @param fillHeader when true, write the first 4 header fields into passed buffer. * @param offset the file offset the block was read from * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} * @param fileContext HFile meta data */ HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, - long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, - final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { + long prevBlockOffset, ByteBuffer b, long offset, + int onDiskDataSizeWithHeader, HFileContext fileContext) { init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, - prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); + prevBlockOffset, offset, onDiskDataSizeWithHeader, fileContext); this.buf = new SingleByteBuff(b); - if (fillHeader) { - overwriteHeader(); - } this.buf.rewind(); } @@ -365,7 +347,7 @@ public class HFileBlock implements Cacheable { * @param buf Has header, content, and trailing checksums if present. */ HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, - final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { + HFileContext fileContext) throws IOException { buf.rewind(); final BlockType blockType = BlockType.read(buf); final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); @@ -394,7 +376,7 @@ public class HFileBlock implements Cacheable { fileContext = fileContextBuilder.build(); assert usesHBaseChecksum == fileContext.isUseHBaseChecksum(); init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, - prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); + prevBlockOffset, offset, onDiskDataSizeWithHeader, fileContext); this.memType = memType; this.offset = offset; this.buf = buf; @@ -406,36 +388,24 @@ public class HFileBlock implements Cacheable { */ private void init(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, long prevBlockOffset, - long offset, int onDiskDataSizeWithHeader, final int nextBlockOnDiskSize, - HFileContext fileContext) { + long offset, int onDiskDataSizeWithHeader, HFileContext fileContext) { this.blockType = blockType; this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader; this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader; this.prevBlockOffset = prevBlockOffset; this.offset = offset; this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader; - this.nextBlockOnDiskSize = nextBlockOnDiskSize; this.fileContext = fileContext; } /** * Parse total ondisk size including header and checksum. - * @param headerBuf Header ByteBuffer. Presumed exact size of header. * @param verifyChecksum true if checksum verification is in use. * @return Size of the block with header included. */ - private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf, boolean verifyChecksum) { - return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + - headerSize(verifyChecksum); - } - - /** - * @return the on-disk size of the next block (including the header size and any checksums if - * present) read by peeking into the next block's header; use as a hint when doing - * a read of the next block when scanning or running over a file. - */ - public int getNextBlockOnDiskSize() { - return nextBlockOnDiskSize; + private static int getOnDiskSizeWithHeader(final byte [] hdr, boolean verifyChecksum) { + return Bytes.toInt(hdr, Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + + headerSize(verifyChecksum); } public BlockType getBlockType() { @@ -443,7 +413,7 @@ public class HFileBlock implements Cacheable { } /** @return get data block encoding id that was used to encode this block */ - public short getDataBlockEncodingId() { + short getDataBlockEncodingId() { if (blockType != BlockType.ENCODED_DATA) { throw new IllegalArgumentException("Querying encoder ID of a block " + "of type other than " + BlockType.ENCODED_DATA + ": " + blockType); @@ -481,23 +451,6 @@ public class HFileBlock implements Cacheable { } /** - * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position - * is modified as side-effect. - */ - private void overwriteHeader() { - buf.rewind(); - blockType.write(buf); - buf.putInt(onDiskSizeWithoutHeader); - buf.putInt(uncompressedSizeWithoutHeader); - buf.putLong(prevBlockOffset); - if (this.fileContext.isUseHBaseChecksum()) { - buf.put(fileContext.getChecksumType().getCode()); - buf.putInt(fileContext.getBytesPerChecksum()); - buf.putInt(onDiskDataSizeWithHeader); - } - } - - /** * Returns a buffer that does not include the header or checksum. * * @return the buffer with header skipped and checksum omitted. @@ -687,7 +640,8 @@ public class HFileBlock implements Cacheable { } /** An additional sanity-check in case no compression or encryption is being used. */ - public void sanityCheckUncompressedSize() throws IOException { + @VisibleForTesting + void sanityCheckUncompressedSize() throws IOException { if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " @@ -777,11 +731,9 @@ public class HFileBlock implements Cacheable { } /** - * Read from an input stream at least necessaryLen and if possible, - * extraLen also if available. Analogous to + * Read from an input stream. Analogous to * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses - * positional read and specifies a number of "extra" bytes that would be - * desirable but not absolutely necessary to read. + * positional read. * * @param in the input stream to read from * @param position the position within the stream from which to start reading @@ -789,31 +741,25 @@ public class HFileBlock implements Cacheable { * @param bufOffset the destination offset in the buffer * @param necessaryLen the number of bytes that are absolutely necessary to * read - * @param extraLen the number of extra bytes that would be nice to read - * @return true if and only if extraLen is > 0 and reading those extra bytes - * was successful * @throws IOException if failed to read the necessary bytes */ @VisibleForTesting - static boolean positionalReadWithExtra(FSDataInputStream in, - long position, byte[] buf, int bufOffset, int necessaryLen, int extraLen) - throws IOException { - int bytesRemaining = necessaryLen + extraLen; + static void positionalReadWithExtra(FSDataInputStream in, long position, byte[] buf, int bufOffset, int necessaryLen) + throws IOException { + int bytesRemaining = necessaryLen; int bytesRead = 0; while (bytesRead < necessaryLen) { int ret = in.read(position, buf, bufOffset, bytesRemaining); if (ret < 0) { throw new IOException("Premature EOF from inputStream (positional read " + "returned " + ret + ", was trying to read " + necessaryLen - + " necessary bytes and " + extraLen + " extra bytes, " - + "successfully read " + bytesRead); + + " necessary bytes, successfully read " + bytesRead); } position += ret; bufOffset += ret; bytesRemaining -= ret; bytesRead += ret; } - return bytesRead != necessaryLen && bytesRemaining <= 0; } /** @@ -1291,7 +1237,7 @@ public class HFileBlock implements Cacheable { cacheConf.shouldCacheCompressed(blockType.getCategory())? getOnDiskBufferWithHeader() : getUncompressedBufferWithHeader(), - FILL_HEADER, startOffset, UNSET, + startOffset, onDiskBlockBytesWithHeader.length + onDiskChecksum.length, newContext); } } @@ -1367,27 +1313,6 @@ public class HFileBlock implements Cacheable { } /** - * Data-structure to use caching the header of the NEXT block. Only works if next read - * that comes in here is next in sequence in this block. - * - * When we read, we read current block and the next blocks' header. We do this so we have - * the length of the next block to read if the hfile index is not available (rare). - * TODO: Review!! This trick of reading next blocks header is a pain, complicates our - * read path and I don't think it needed given it rare we don't have the block index - * (it is 'normally' present, gotten from the hfile index). FIX!!! - */ - private static class PrefetchedHeader { - long offset = -1; - byte [] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; - final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE); - - @Override - public String toString() { - return "offset=" + this.offset + ", header=" + Bytes.toStringBinary(header); - } - } - - /** * Reads version 2 blocks from the filesystem. */ static class FSReaderImpl implements FSReader { @@ -1400,15 +1325,6 @@ public class HFileBlock implements Cacheable { /** Default context used when BlockType != {@link BlockType#ENCODED_DATA}. */ private final HFileBlockDefaultDecodingContext defaultDecodingCtx; - /** - * Cache of the NEXT header after this. Check it is indeed next blocks header - * before using it. TODO: Review. This overread into next block to fetch - * next blocks header seems unnecessary given we usually get the block size - * from the hfile index. Review! - */ - private AtomicReference prefetchedHeader = - new AtomicReference(new PrefetchedHeader()); - /** The size of the file we are reading from, or -1 if unknown. */ protected long fileSize; @@ -1453,21 +1369,20 @@ public class HFileBlock implements Cacheable { this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext); } + // TODO: TAKE INDEX!!!! public BlockIterator blockRange(final long startOffset, final long endOffset) { final FSReader owner = this; // handle for inner class return new BlockIterator() { private long offset = startOffset; - // Cache length of next block. Current block has the length of next block in it. - private long length = -1; @Override public HFileBlock nextBlock() throws IOException { if (offset >= endOffset) { return null; } - HFileBlock b = readBlockData(offset, length, false); + // TODO: GET FROM INDEX!!!! + HFileBlock b = readBlockData(offset, -1, false); offset += b.getOnDiskSizeWithHeader(); - length = b.getNextBlockOnDiskSize(); return b.unpack(fileContext, owner); } @@ -1491,59 +1406,30 @@ public class HFileBlock implements Cacheable { * @param dest destination buffer * @param destOffset offset into the destination buffer at where to put the bytes we read * @param size size of read - * @param peekIntoNextBlock whether to read the next block's on-disk size * @param fileOffset position in the stream to read at * @param pread whether we should do a positional read * @param istream The input source of data - * @return the on-disk size of the next block with header size included, or - * -1 if it could not be determined; if not -1, the dest INCLUDES the - * next header * @throws IOException */ - protected int readAtOffset(FSDataInputStream istream, byte [] dest, int destOffset, int size, - boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException { - if (peekIntoNextBlock && destOffset + size + hdrSize > dest.length) { - // We are asked to read the next block's header as well, but there is - // not enough room in the array. - throw new IOException("Attempted to read " + size + " bytes and " + - hdrSize + " bytes of next header into a " + dest.length + - "-byte array at offset " + destOffset); - } - + protected void readAtOffset(FSDataInputStream istream, byte [] dest, int destOffset, int size, + long fileOffset, boolean pread) + throws IOException { if (!pread && streamLock.tryLock()) { // Seek + read. Better for scanning. try { istream.seek(fileOffset); - long realOffset = istream.getPos(); if (realOffset != fileOffset) { - throw new IOException("Tried to seek to " + fileOffset + " to " - + "read " + size + " bytes, but pos=" + realOffset - + " after seek"); - } - - if (!peekIntoNextBlock) { - IOUtils.readFully(istream, dest, destOffset, size); - return -1; - } - - // Try to read the next block header. - if (!readWithExtra(istream, dest, destOffset, size, hdrSize)) { - return -1; + throw new IOException("Tried to seek to " + fileOffset + " to read " + size + + " bytes, but pos=" + realOffset + " after seek"); } + IOUtils.readFully(istream, dest, destOffset, size); } finally { streamLock.unlock(); } } else { - // Positional read. Better for random reads; or when the streamLock is already locked. - int extraSize = peekIntoNextBlock ? hdrSize : 0; - if (!positionalReadWithExtra(istream, fileOffset, dest, destOffset, size, extraSize)) { - return -1; - } + positionalReadWithExtra(istream, fileOffset, dest, destOffset, size); } - - assert peekIntoNextBlock; - return Bytes.toInt(dest, destOffset + size + BlockType.MAGIC_LENGTH) + hdrSize; } /** @@ -1642,7 +1528,7 @@ public class HFileBlock implements Cacheable { * is not right. * @throws IOException */ - private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuffer headerBuf, + private void verifyOnDiskSizeMatchesHeader(final int passedIn, final byte [] headerBuf, final long offset, boolean verifyChecksum) throws IOException { // Assert size provided aligns with what is in the header @@ -1654,34 +1540,6 @@ public class HFileBlock implements Cacheable { } /** - * Check atomic reference cache for this block's header. Cache only good if next - * read coming through is next in sequence in the block. We read next block's - * header on the tail of reading the previous block to save a seek. Otherwise, - * we have to do a seek to read the header before we can pull in the block OR - * we have to backup the stream because we over-read (the next block's header). - * @see PrefetchedHeader - * @return The cached block header or null if not found. - * @see #cacheNextBlockHeader(long, byte[], int, int) - */ - private ByteBuffer getCachedHeader(final long offset) { - PrefetchedHeader ph = this.prefetchedHeader.get(); - return ph != null && ph.offset == offset? ph.buf: null; - } - - /** - * Save away the next blocks header in atomic reference. - * @see #getCachedHeader(long) - * @see PrefetchedHeader - */ - private void cacheNextBlockHeader(final long offset, - final byte [] header, final int headerOffset, final int headerLength) { - PrefetchedHeader ph = new PrefetchedHeader(); - ph.offset = offset; - System.arraycopy(header, headerOffset, ph.header, 0, headerLength); - this.prefetchedHeader.set(ph); - } - - /** * Reads a version 2 block. * * @param offset the offset in the stream to read at. Usually the @@ -1703,56 +1561,39 @@ public class HFileBlock implements Cacheable { + "block (onDiskSize=" + onDiskSizeWithHeaderL + ")"); } int onDiskSizeWithHeader = checkAndGetSizeAsInt(onDiskSizeWithHeaderL, hdrSize); - // Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1 - // and will save us having to seek the stream backwards to reread the header we - // read the last time through here. - ByteBuffer headerBuf = getCachedHeader(offset); if (LOG.isTraceEnabled()) { LOG.trace("Reading " + this.fileContext.getHFileName() + " at offset=" + offset + - ", pread=" + pread + ", verifyChecksum=" + verifyChecksum + ", cachedHeader=" + - headerBuf + ", onDiskSizeWithHeader=" + onDiskSizeWithHeader); + ", pread=" + pread + ", verifyChecksum=" + verifyChecksum + + ", onDiskSizeWithHeader=" + onDiskSizeWithHeader); } + boolean chksum = this.fileContext.isUseHBaseChecksum(); + byte [] hdr = null; if (onDiskSizeWithHeader <= 0) { - // We were not passed the block size. Need to get it from the header. If header was not in - // cache, need to seek to pull it in. This is costly and should happen very rarely. - // Currently happens on open of a hfile reader where we read the trailer blocks for - // indices. Otherwise, we are reading block sizes out of the hfile index. To check, - // enable TRACE in this file and you'll get an exception in a LOG every time we seek. - // See HBASE-17072 for more detail. - if (headerBuf == null) { - if (LOG.isTraceEnabled()) { - LOG.trace("Extra see to get block size!", new RuntimeException()); - } - headerBuf = ByteBuffer.allocate(hdrSize); - readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false, - offset, pread); + // We were not passed the block size. Need to get it from header. This is costly and + // should happen very rarely. Currently happens on open of a hfile reader where we + // read the trailer blocks for indices. Otherwise, we are reading block sizes out of + // the hfile index. To check, enable TRACE in this file and you'll get an exception + // in a LOG every time we seek. See HBASE-17072 for more detail. + if (LOG.isTraceEnabled()) { + LOG.trace("Extra see to get block size!", new RuntimeException()); } - onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, - this.fileContext.isUseHBaseChecksum()); + hdr = new byte [hdrSize]; + readAtOffset(is, hdr, 0, hdr.length, offset, pread); + onDiskSizeWithHeader = getOnDiskSizeWithHeader(hdr, chksum); } - int preReadHeaderSize = headerBuf == null? 0 : hdrSize; - // Allocate enough space to fit the next block's header too; saves a seek next time through. - // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header; - // onDiskSizeWithHeader is header, body, and any checksums if present. preReadHeaderSize - // says where to start reading. If we have the header cached, then we don't need to read - // it again and we can likely read from last place we left off w/o need to backup and reread - // the header we read last time through here. TODO: Review this overread of the header. Is it necessary - // when we get the block size from the hfile index? See note on PrefetchedHeader class above. - // TODO: Make this ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap). - byte [] onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize]; - int nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, preReadHeaderSize, - onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread); - if (headerBuf != null) { - // The header has been read when reading the previous block OR in a distinct header-only - // read. Copy to this block's header. - System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize); - } else { - headerBuf = ByteBuffer.wrap(onDiskBlock, 0, hdrSize); + // onDiskSizeWithHeader is header, body, and any checksums if present. + // If we read hdr above, then don't reread it (hdr==null check). + // TODO: MakeByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap). + byte [] onDiskBlock = new byte[onDiskSizeWithHeader]; + readAtOffset(is, onDiskBlock, hdr == null? null: hdr.length, onDiskSizeWithHeader, + offset, pread); + if (hdr != null) { + // We read hdr earlier; copy to top of the block. + System.arraycopy(hdr, 0, onDiskBlock, 0, hdrSize); } // Do a few checks before we go instantiate HFileBlock. assert onDiskSizeWithHeader > this.hdrSize; - verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, - this.fileContext.isUseHBaseChecksum()); + verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, onDiskBlock, offset, chksum); ByteBuffer onDiskBlockByteBuffer = ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader); // Verify checksum of the data before using it for building HFileBlock. if (verifyChecksum && @@ -1762,10 +1603,8 @@ public class HFileBlock implements Cacheable { // The onDiskBlock will become the headerAndDataBuffer for this block. // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already // contains the header of next block, so no need to set next block's header in it. - HFileBlock hFileBlock = - new HFileBlock(new SingleByteBuff(onDiskBlockByteBuffer), - this.fileContext.isUseHBaseChecksum(), MemoryType.EXCLUSIVE, offset, - nextBlockOnDiskSize, fileContext); + HFileBlock hFileBlock = new HFileBlock(new SingleByteBuff(onDiskBlockByteBuffer), + chksum, MemoryType.EXCLUSIVE, offset, this.fileContext); // Run check on uncompressed sizings. if (!fileContext.isCompressedOrEncrypted()) { hFileBlock.sanityCheckUncompressed(); @@ -1773,11 +1612,6 @@ public class HFileBlock implements Cacheable { if (LOG.isTraceEnabled()) { LOG.trace("Read " + hFileBlock); } - // Cache next block header if we read it for the next time through here. - if (nextBlockOnDiskSize != -1) { - cacheNextBlockHeader(offset + hFileBlock.getOnDiskSizeWithHeader(), - onDiskBlock, onDiskSizeWithHeader, hdrSize); - } return hFileBlock; } @@ -1880,9 +1714,9 @@ public class HFileBlock implements Cacheable { * @return The passed destination with metadata added. */ private ByteBuffer addMetaData(final ByteBuffer destination) { - destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0); + destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0); // TODO: !!! NEED? destination.putLong(this.offset); - destination.putInt(this.nextBlockOnDiskSize); + destination.putInt(-1); // TODO!!!!! REMOVE!!!!!! return destination; } @@ -1896,7 +1730,6 @@ public class HFileBlock implements Cacheable { public int hashCode() { int result = 1; result = result * 31 + blockType.hashCode(); - result = result * 31 + nextBlockOnDiskSize; result = result * 31 + (int) (offset ^ (offset >>> 32)); result = result * 31 + onDiskSizeWithoutHeader; result = result * 31 + (int) (prevBlockOffset ^ (prevBlockOffset >>> 32)); @@ -1922,9 +1755,6 @@ public class HFileBlock implements Cacheable { if (castedComparison.blockType != this.blockType) { return false; } - if (castedComparison.nextBlockOnDiskSize != this.nextBlockOnDiskSize) { - return false; - } // Offset is important. Needed when we have to remake cachekey when block is returned to cache. if (castedComparison.offset != this.offset) { return false;