.../apache/hadoop/hbase/io/hfile/BlockType.java | 32 +++++ .../hbase/util/AbstractPositionedByteRange.java | 3 + .../hadoop/hbase/util/PositionedByteRange.java | 7 + .../hbase/util/SimplePositionedByteRange.java | 5 + .../util/SimplePositionedMutableByteRange.java | 8 ++ .../apache/hadoop/hbase/io/hfile/Cacheable.java | 2 + .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 141 +++++++++++++-------- .../hadoop/hbase/io/hfile/bucket/BucketCache.java | 2 +- .../hadoop/hbase/io/hfile/CacheTestUtils.java | 18 ++- .../hadoop/hbase/io/hfile/TestCacheConfig.java | 7 + .../hbase/io/hfile/TestCachedBlockQueue.java | 8 ++ .../hadoop/hbase/io/hfile/TestHFileBlock.java | 6 +- .../io/hfile/TestHFileBlockCompatibility.java | 9 +- .../hbase/io/hfile/TestHFileDataBlockEncoder.java | 19 +-- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 7 + 15 files changed, 204 insertions(+), 70 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java index 135d25c..2ee480f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java @@ -27,6 +27,7 @@ import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PositionedByteRange; /** * Various types of HFile blocks. Ordinal values of these enum constants must not be relied upon. @@ -216,4 +217,35 @@ public enum BlockType { return this == DATA || this == ENCODED_DATA; } + public static BlockType read(PositionedByteRange buf) throws IOException { + BlockType blockType = parse(buf, buf.getOffset() + buf.getPosition(), + Math.min(buf.getLimit() - buf.getPosition(), MAGIC_LENGTH)); + buf.setPosition(buf.getPosition() + MAGIC_LENGTH); + return blockType; + } + + public static BlockType parse(PositionedByteRange buf, int offset, int length) throws IOException { + if (length != MAGIC_LENGTH) { + throw new IOException("Magic record of invalid length: " + + Bytes.toStringBinary(buf.getBytes(), offset, length)); + } + + BlockType t = null; + for (BlockType blockType : values()) { + t = blockType; + if (Bytes.compareTo(blockType.magic, 0, MAGIC_LENGTH, buf.getBytes(), offset, + MAGIC_LENGTH) == 0) { + return blockType; + } + } + + throw new IOException("Invalid HFile block magic: " + + Bytes.toStringBinary(buf.getBytes(), offset, MAGIC_LENGTH)); + } + + public void write(PositionedByteRange buf) { + buf.put(0, magic); + buf.setPosition(magic.length + buf.getPosition()); + } + } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java index 26bcf97..04043c7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java @@ -149,6 +149,9 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl public abstract PositionedByteRange put(byte val); @Override + public abstract PositionedByteRange put(PositionedByteRange pbr); + + @Override public abstract PositionedByteRange put(byte[] val); @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java index 0d31154..aab8ed9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java @@ -117,6 +117,13 @@ public interface PositionedByteRange extends ByteRange { * @return this. */ public PositionedByteRange put(byte val); + + /** + * Store (@code pbr} another positioned byte range in this range + * @param pbr the pbr to store + * @return this + */ + public PositionedByteRange put(PositionedByteRange pbr); /** * Store short {@code val} at the next position in this range. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedByteRange.java index 2f7e5c3..9d8031a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedByteRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedByteRange.java @@ -125,6 +125,11 @@ public class SimplePositionedByteRange extends AbstractPositionedByteRange { } @Override + public PositionedByteRange put(PositionedByteRange pbr) { + throw new ReadOnlyByteRangeException(); + } + + @Override public PositionedByteRange put(byte[] val, int offset, int length) { throw new ReadOnlyByteRangeException(); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java index cb4ae86..7962513 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java @@ -149,6 +149,14 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang } @Override + public PositionedByteRange put(PositionedByteRange pbr) { + for (int i = 0; i < pbr.getLimit(); i++) { + put(pbr.get(i)); + } + return this; + } + + @Override public PositionedByteRange put(byte[] val) { if (0 == val.length) return this; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java index 903e6d7..ae5502a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java @@ -23,6 +23,7 @@ import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.util.PositionedByteRange; /** * Cacheable is an interface that allows for an object to be cached. If using an @@ -49,6 +50,7 @@ public interface Cacheable extends HeapSize { */ void serialize(ByteBuffer destination); + void serialize(PositionedByteRange destination); /** * Returns CacheableDeserializer instance which reconstructs original object from ByteBuffer. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 3e26107..31a4ae8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -49,6 +49,9 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CompoundBloomFilter; +import org.apache.hadoop.hbase.util.PositionedByteRange; +import org.apache.hadoop.hbase.util.SimplePositionedByteRange; +import org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; @@ -121,10 +124,14 @@ public class HFileBlock implements Cacheable { private static final CacheableDeserializer blockDeserializer = new CacheableDeserializer() { + // This should directly deal with PBR public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws IOException{ buf.limit(buf.limit() - HFileBlock.EXTRA_SERIALIZATION_SPACE).rewind(); ByteBuffer newByteBuffer; if (reuse) { + // TODO : We need an equivalent for slice in PBR + // as we are resetting the value of the position to 0 using + // PBR.shallowCopySubRange should be fine? newByteBuffer = buf.slice(); } else { newByteBuffer = ByteBuffer.allocate(buf.limit()); @@ -133,7 +140,8 @@ public class HFileBlock implements Cacheable { buf.position(buf.limit()); buf.limit(buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE); boolean usesChecksum = buf.get() == (byte)1; - HFileBlock ourBuffer = new HFileBlock(newByteBuffer, usesChecksum); + HFileBlock ourBuffer = new HFileBlock(new SimplePositionedByteRange( + newByteBuffer.array()), usesChecksum); ourBuffer.offset = buf.getLong(); ourBuffer.nextBlockOnDiskSizeWithHeader = buf.getInt(); return ourBuffer; @@ -170,7 +178,7 @@ public class HFileBlock implements Cacheable { private final int onDiskDataSizeWithHeader; /** The in-memory representation of the hfile block */ - private ByteBuffer buf; + private PositionedByteRange buf; /** Meta data that holds meta information on the hfileblock**/ private HFileContext fileContext; @@ -212,7 +220,7 @@ public class HFileBlock implements Cacheable { * @param fileContext HFile meta data */ HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer buf, + int uncompressedSizeWithoutHeader, long prevBlockOffset, PositionedByteRange buf, boolean fillHeader, long offset, int onDiskDataSizeWithHeader, HFileContext fileContext) { this.blockType = blockType; @@ -235,8 +243,9 @@ public class HFileBlock implements Cacheable { * because majorNumbers indicate the format of a HFile whereas minorNumbers * indicate the format inside a HFileBlock. */ - HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException { - b.rewind(); + HFileBlock(PositionedByteRange b, boolean usesHBaseChecksum) throws IOException { + b.setPosition(0); + b.setOffset(0); blockType = BlockType.read(b); onDiskSizeWithoutHeader = b.getInt(); uncompressedSizeWithoutHeader = b.getInt(); @@ -255,7 +264,8 @@ public class HFileBlock implements Cacheable { } this.fileContext = contextBuilder.build(); buf = b; - buf.rewind(); + buf.setPosition(0); + buf.setOffset(0); } public BlockType getBlockType() { @@ -312,7 +322,8 @@ public class HFileBlock implements Cacheable { * buffer. Resets the buffer position to the end of header as side effect. */ private void overwriteHeader() { - buf.rewind(); + buf.setPosition(0); + buf.setOffset(0); blockType.write(buf); buf.putInt(onDiskSizeWithoutHeader); buf.putInt(uncompressedSizeWithoutHeader); @@ -327,8 +338,8 @@ public class HFileBlock implements Cacheable { * @return the buffer with header skipped */ public ByteBuffer getBufferWithoutHeader() { - return ByteBuffer.wrap(buf.array(), buf.arrayOffset() + headerSize(), - buf.limit() - headerSize() - totalChecksumBytes()).slice(); + return ByteBuffer.wrap(buf.getBytes(), buf.getOffset() + headerSize(), + buf.getLimit() - headerSize() - totalChecksumBytes()).slice(); } /** @@ -341,8 +352,8 @@ public class HFileBlock implements Cacheable { * @return the buffer of this block for read-only operations */ public ByteBuffer getBufferReadOnly() { - return ByteBuffer.wrap(buf.array(), buf.arrayOffset(), - buf.limit() - totalChecksumBytes()).slice(); + return ByteBuffer.wrap(buf.getBytes(), buf.getOffset(), + buf.getLimit() - totalChecksumBytes()).slice(); } /** @@ -353,7 +364,7 @@ public class HFileBlock implements Cacheable { * @return the byte buffer with header included for read-only operations */ public ByteBuffer getBufferReadOnlyWithHeader() { - return ByteBuffer.wrap(buf.array(), buf.arrayOffset(), buf.limit()).slice(); + return ByteBuffer.wrap(buf.getBytes(), buf.getOffset(), buf.getLimit()).slice(); } /** @@ -363,7 +374,7 @@ public class HFileBlock implements Cacheable { * @return the byte buffer with header included */ ByteBuffer getBufferWithHeader() { - ByteBuffer dupBuf = buf.duplicate(); + ByteBuffer dupBuf = ByteBuffer.wrap(buf.shallowCopy().getBytes(), 0, buf.getLimit()); dupBuf.rewind(); return dupBuf; } @@ -383,7 +394,8 @@ public class HFileBlock implements Cacheable { * is not thread-safe, because it alters the internal buffer pointer. */ void sanityCheck() throws IOException { - buf.rewind(); + buf.setPosition(0); + buf.setOffset(0); { BlockType blockTypeFromBuf = BlockType.read(buf); @@ -411,17 +423,17 @@ public class HFileBlock implements Cacheable { int hdrSize = headerSize(); int expectedBufLimit = uncompressedSizeWithoutHeader + headerSize() + cksumBytes; - if (buf.limit() != expectedBufLimit) { + if (buf.getLimit() != expectedBufLimit) { throw new AssertionError("Expected buffer limit " + expectedBufLimit - + ", got " + buf.limit()); + + ", got " + buf.getLimit()); } // We might optionally allocate HFILEBLOCK_HEADER_SIZE more bytes to read the next // block's, header, so there are two sensible values for buffer capacity. int size = uncompressedSizeWithoutHeader + hdrSize + cksumBytes; - if (buf.capacity() != size && - buf.capacity() != size + hdrSize) { - throw new AssertionError("Invalid buffer capacity: " + buf.capacity() + + if (buf.getLength() != size && + buf.getLength() != size + hdrSize) { + throw new AssertionError("Invalid buffer capacity: " + buf.getLength() + ", expected " + size + " or " + (size + hdrSize)); } } @@ -437,8 +449,8 @@ public class HFileBlock implements Cacheable { + ", prevBlockOffset=" + prevBlockOffset + ", dataBeginsWith=" - + Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(), - Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())) + + Bytes.toStringBinary(buf.getBytes(), buf.getOffset() + headerSize(), + Math.min(32, buf.getLimit() - buf.getOffset() - headerSize())) + ", fileOffset=" + offset; } @@ -447,8 +459,8 @@ public class HFileBlock implements Cacheable { if (onDiskSizeWithoutHeader != expectedOnDiskSizeWithoutHeader) { String blockInfoMsg = "Block offset: " + offset + ", data starts with: " - + Bytes.toStringBinary(buf.array(), buf.arrayOffset(), - buf.arrayOffset() + Math.min(32, buf.limit())); + + Bytes.toStringBinary(buf.getBytes(), buf.getOffset(), + buf.getOffset() + Math.min(32, buf.getLimit())); throw new IOException("On-disk size without header provided is " + expectedOnDiskSizeWithoutHeader + ", but block " + "header contains " + onDiskSizeWithoutHeader + ". " + @@ -470,14 +482,13 @@ public class HFileBlock implements Cacheable { cksumBytes + (extraBytes ? headerSize() : 0); - ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded); - + PositionedByteRange newBuf = new SimplePositionedMutableByteRange(capacityNeeded); // Copy header bytes. - System.arraycopy(buf.array(), buf.arrayOffset(), newBuf.array(), - newBuf.arrayOffset(), headerSize()); + System.arraycopy(buf.getBytes(), buf.getOffset(), newBuf.getBytes(), + newBuf.getOffset(), headerSize()); buf = newBuf; - buf.limit(headerSize() + uncompressedSizeWithoutHeader + cksumBytes); + buf.setLimit(headerSize() + uncompressedSizeWithoutHeader + cksumBytes); } /** An additional sanity-check in case no compression is being used. */ @@ -515,8 +526,8 @@ public class HFileBlock implements Cacheable { * @return a byte stream reading the data section of this block */ public DataInputStream getByteStream() { - return new DataInputStream(new ByteArrayInputStream(buf.array(), - buf.arrayOffset() + headerSize(), buf.limit() - headerSize())); + return new DataInputStream(new ByteArrayInputStream(buf.getBytes(), + buf.getOffset() + headerSize(), buf.getLimit() - headerSize())); } @Override @@ -536,7 +547,7 @@ public class HFileBlock implements Cacheable { if (buf != null) { // Deep overhead of the byte buffer. Needs to be aligned separately. - size += ClassSize.align(buf.capacity() + BYTE_BUFFER_HEAP_SIZE); + size += ClassSize.align(buf.getLength() + BYTE_BUFFER_HEAP_SIZE); } return ClassSize.align(size); @@ -1008,6 +1019,11 @@ public class HFileBlock implements Cacheable { return ByteBuffer.wrap(uncompressedBytesWithHeader); } + PositionedByteRange getUncompressedByteRangeWithHeader() { + expectState(State.BLOCK_READY); + return new SimplePositionedMutableByteRange(uncompressedBytesWithHeader); + } + private void expectState(State expectedState) { if (state != expectedState) { throw new IllegalStateException("Expected state: " + expectedState + @@ -1051,7 +1067,7 @@ public class HFileBlock implements Cacheable { .withIncludesTags(fileContext.isIncludesTags()) .build(); return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(), - getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(), + getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedByteRangeWithHeader(), DONT_FILL_HEADER, startOffset, onDiskBytesWithHeader.length + onDiskChecksum.length, newContext); } @@ -1223,9 +1239,12 @@ public class HFileBlock implements Cacheable { + "read " + size + " bytes, but pos=" + realOffset + " after seek"); } - if (!peekIntoNextBlock) { + try { IOUtils.readFully(istream, dest, destOffset, size); + } catch(Exception e) { + throw new IOException(e); + } return -1; } @@ -1263,7 +1282,8 @@ public class HFileBlock implements Cacheable { private static class PrefetchedHeader { long offset = -1; byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; - ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE); + PositionedByteRange buf = new SimplePositionedMutableByteRange(header, 0, + HConstants.HFILEBLOCK_HEADER_SIZE); } /** Reads version 2 blocks from the filesystem. */ @@ -1425,7 +1445,7 @@ public class HFileBlock implements Cacheable { // And we also want to skip reading the header again if it has already // been read. PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get(); - ByteBuffer headerBuf = prefetchedHeader.offset == offset ? + PositionedByteRange headerBuf = prefetchedHeader.offset == offset ? prefetchedHeader.buf : null; int nextBlockOnDiskSize = 0; @@ -1451,10 +1471,10 @@ public class HFileBlock implements Cacheable { if (headerBuf != null) { // the header has been read when reading the previous block, copy // to this block's header - System.arraycopy(headerBuf.array(), - headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize); + System.arraycopy(headerBuf.getBytes(), + headerBuf.getOffset(), onDiskBlock, 0, hdrSize); } else { - headerBuf = ByteBuffer.wrap(onDiskBlock, 0, hdrSize); + headerBuf = new SimplePositionedMutableByteRange(onDiskBlock, 0, hdrSize); } // We know the total on-disk size but not the uncompressed size. Read // the entire block into memory, then parse the header and decompress @@ -1495,14 +1515,14 @@ public class HFileBlock implements Cacheable { // operations. This might happen when we are doing the first read // in a series of reads or a random read, and we don't have access // to the block index. This is costly and should happen very rarely. - headerBuf = ByteBuffer.allocate(hdrSize); - readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), + headerBuf = new SimplePositionedMutableByteRange(hdrSize); + readAtOffset(is, headerBuf.getBytes(), headerBuf.getOffset(), hdrSize, false, offset, pread); } b = new HFileBlock(headerBuf, this.fileContext.isUseHBaseChecksum()); onDiskBlock = new byte[b.getOnDiskSizeWithHeader() + hdrSize]; - System.arraycopy(headerBuf.array(), - headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize); + System.arraycopy(headerBuf.getBytes(), + headerBuf.getOffset(), onDiskBlock, 0, hdrSize); nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, hdrSize, b.getOnDiskSizeWithHeader() - hdrSize, true, offset + hdrSize, pread); @@ -1541,8 +1561,8 @@ public class HFileBlock implements Cacheable { } if (nextBlockOnDiskSize > 0) { // Copy next block's header bytes into the new block if we have them. - System.arraycopy(onDiskBlock, onDiskSizeWithHeader, b.buf.array(), - b.buf.arrayOffset() + hdrSize + System.arraycopy(onDiskBlock, onDiskSizeWithHeader, b.buf.getBytes(), + b.buf.getOffset() + hdrSize + b.uncompressedSizeWithoutHeader + b.totalChecksumBytes(), hdrSize); } @@ -1551,8 +1571,8 @@ public class HFileBlock implements Cacheable { // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already // contains the header of next block, so no need to set next // block's header in it. - b = new HFileBlock(ByteBuffer.wrap(onDiskBlock, 0, - onDiskSizeWithHeader), this.fileContext.isUseHBaseChecksum()); + b = new HFileBlock(new SimplePositionedMutableByteRange(onDiskBlock, 0, + onDiskSizeWithHeader), this.fileContext.isUseHBaseChecksum()); } b.nextBlockOnDiskSizeWithHeader = nextBlockOnDiskSize; @@ -1599,15 +1619,26 @@ public class HFileBlock implements Cacheable { @Override public int getSerializedLength() { if (buf != null) { - return this.buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE; + return this.buf.getLimit() + HFileBlock.EXTRA_SERIALIZATION_SPACE; } return 0; } @Override public void serialize(ByteBuffer destination) { - ByteBuffer dupBuf = this.buf.duplicate(); - dupBuf.rewind(); + PositionedByteRange dupBuf = this.buf.shallowCopy(); + dupBuf.setPosition(0); + dupBuf.setOffset(0); + destination.put(ByteBuffer.wrap(dupBuf.getBytes(), dupBuf.getOffset(), dupBuf.getLength())); + serializeExtraInfo(destination); + } + + @Override + public void serialize(PositionedByteRange destination) { + // Ensure that the caller is a Mutable Byte Range + PositionedByteRange dupBuf = this.buf.shallowCopy(); + dupBuf.setPosition(0); + dupBuf.setOffset(0); destination.put(dupBuf); serializeExtraInfo(destination); } @@ -1619,6 +1650,14 @@ public class HFileBlock implements Cacheable { destination.rewind(); } + public void serializeExtraInfo(PositionedByteRange destination) { + destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0); + destination.putLong(this.offset); + destination.putInt(this.nextBlockOnDiskSizeWithHeader); + destination.setPosition(0); + destination.setOffset(0); + } + @Override public CacheableDeserializer getDeserializer() { return HFileBlock.blockDeserializer; @@ -1659,10 +1698,10 @@ public class HFileBlock implements Cacheable { if (this.buf.compareTo(castedComparison.buf) != 0) { return false; } - if (this.buf.position() != castedComparison.buf.position()){ + if (this.buf.getPosition() != castedComparison.buf.getPosition()){ return false; } - if (this.buf.limit() != castedComparison.buf.limit()){ + if (this.buf.getLimit() != castedComparison.buf.getLimit()){ return false; } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index e3a8118..7f8e550 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -52,6 +52,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil; import org.apache.hadoop.hbase.io.hfile.BlockPriority; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheStats; @@ -59,7 +60,6 @@ import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.CachedBlock; -import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil; import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache; import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.util.ConcurrentIndex; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index 5ef8cf0..2735cd2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.util.ChecksumType; +import org.apache.hadoop.hbase.util.PositionedByteRange; +import org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange; public class CacheTestUtils { @@ -312,6 +314,14 @@ public class CacheTestUtils { public BlockType getBlockType() { return BlockType.DATA; } + + @Override + public void serialize(PositionedByteRange destination) { + destination.putInt(buf.length); + Thread.yield(); + destination.put(buf); + destination.setPosition(0); + } } @@ -326,10 +336,10 @@ public class CacheTestUtils { // declare our data size to be smaller than it by the serialization space // required. - ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize + PositionedByteRange cachedBuffer = new SimplePositionedMutableByteRange(blockSize - HFileBlock.EXTRA_SERIALIZATION_SPACE); - rand.nextBytes(cachedBuffer.array()); - cachedBuffer.rewind(); + rand.nextBytes(cachedBuffer.getBytes()); + cachedBuffer.setPosition(0); int onDiskSizeWithoutHeader = blockSize - HFileBlock.EXTRA_SERIALIZATION_SPACE; int uncompressedSizeWithoutHeader = blockSize @@ -339,7 +349,7 @@ public class CacheTestUtils { cachedBuffer.putInt(onDiskSizeWithoutHeader); cachedBuffer.putInt(uncompressedSizeWithoutHeader); cachedBuffer.putLong(prevBlockOffset); - cachedBuffer.rewind(); + cachedBuffer.setPosition(0); HFileContext meta = new HFileContextBuilder() .withHBaseCheckSum(false) .withIncludesMvcc(includesMemstoreTS) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index 156bfc1..ef2602a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.util.PositionedByteRange; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.Before; @@ -130,6 +131,12 @@ public class TestCacheConfig { public BlockType getBlockType() { return BlockType.DATA; } + + @Override + public void serialize(PositionedByteRange destination) { + // TODO Auto-generated method stub + + } }; static class MetaCacheEntry extends DataCacheEntry { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java index 1bec7f4..3bc57a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java @@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.io.hfile; import java.nio.ByteBuffer; import junit.framework.TestCase; + import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.util.PositionedByteRange; import org.junit.experimental.categories.Category; @Category(SmallTests.class) @@ -140,6 +142,12 @@ public class TestCachedBlockQueue extends TestCase { return BlockType.DATA; } + @Override + public void serialize(PositionedByteRange destination) { + // TODO Auto-generated method stub + + } + }, accessTime, false); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index 09561cb..cc5e858 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.compress.Compressor; import org.junit.Before; @@ -794,9 +795,8 @@ public class TestHFileBlock { .withCompression(Algorithm.NONE) .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .withChecksumType(ChecksumType.NULL).build(); - HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, - HFileBlock.FILL_HEADER, -1, - 0, meta); + HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, + new SimplePositionedMutableByteRange(buf.array()), HFileBlock.FILL_HEADER, -1, 0, meta); long byteBufferExpectedSize = ClassSize.align(ClassSize.estimateBase(buf.getClass(), true) + HConstants.HFILEBLOCK_HEADER_SIZE + size); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java index 88fdb77..8c981c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java @@ -51,6 +51,8 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; +import org.apache.hadoop.hbase.util.PositionedByteRange; +import org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.compress.Compressor; import org.junit.Before; @@ -705,9 +707,10 @@ public class TestHFileBlockCompatibility { * * @return uncompressed block for caching on write in the form of a buffer */ - public ByteBuffer getUncompressedBufferWithHeader() { + @Override + public PositionedByteRange getUncompressedByteRangeWithHeader() { byte[] b = getUncompressedDataWithHeader(); - return ByteBuffer.wrap(b, 0, b.length); + return new SimplePositionedMutableByteRange(b, 0, b.length); } /** @@ -737,7 +740,7 @@ public class TestHFileBlockCompatibility { .build(); return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(), getUncompressedSizeWithoutHeader(), prevOffset, - getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset, + getUncompressedByteRangeWithHeader(), DONT_FILL_HEADER, startOffset, getOnDiskSizeWithoutHeader(), meta); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index 3f2c84b..f8c6891 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.BufferGrabbingByteArrayOutputStream; import org.apache.hadoop.hbase.util.ChecksumType; +import org.apache.hadoop.hbase.util.PositionedByteRange; +import org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange; import org.apache.hadoop.hbase.util.test.RedundantKVGenerator; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -113,10 +115,10 @@ public class TestHFileDataBlockEncoder { List kvs = generator.generateTestKeyValues(60, useTags); ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS); int size = keyValues.limit(); - ByteBuffer buf = ByteBuffer.allocate(size + headerSize); - buf.position(headerSize); + PositionedByteRange buf = new SimplePositionedMutableByteRange(size + headerSize); + buf.setPosition(headerSize); keyValues.rewind(); - buf.put(keyValues); + buf.put(keyValues.array()); HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false) .withIncludesMvcc(includesMemstoreTS) .withIncludesTags(useTags) @@ -159,10 +161,10 @@ public class TestHFileDataBlockEncoder { private HFileBlock getSampleHFileBlock(List kvs, boolean useTag) { ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS); int size = keyValues.limit(); - ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE); - buf.position(HConstants.HFILEBLOCK_HEADER_SIZE); + PositionedByteRange buf = new SimplePositionedMutableByteRange(size + HConstants.HFILEBLOCK_HEADER_SIZE); + buf.setPosition(HConstants.HFILEBLOCK_HEADER_SIZE); keyValues.rewind(); - buf.put(keyValues); + buf.put(keyValues.array()); HFileContext meta = new HFileContextBuilder() .withIncludesMvcc(includesMemstoreTS) .withIncludesTags(useTag) @@ -196,8 +198,9 @@ public class TestHFileDataBlockEncoder { blockEncoder.endBlockEncoding(context, dos, stream.getBuffer(), BlockType.DATA); byte[] encodedBytes = baos.toByteArray(); size = encodedBytes.length - block.getDummyHeaderForVersion().length; - return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes), - HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), block.getHFileContext()); + return new HFileBlock(context.getBlockType(), size, size, -1, + new SimplePositionedMutableByteRange(encodedBytes), HFileBlock.FILL_HEADER, 0, + block.getOnDiskDataSizeWithHeader(), block.getHFileContext()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index fa48676..abb34f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.PositionedByteRange; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -778,6 +779,12 @@ public class TestLruBlockCache { return BlockType.DATA; } + @Override + public void serialize(PositionedByteRange destination) { + // TODO Auto-generated method stub + + } + } }