diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b8303b8..d476679 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1402,7 +1402,6 @@ public class HFileBlock implements Cacheable { private static class PrefetchedHeader { long offset = -1; byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; - final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE); } /** Reads version 2 blocks from the filesystem. */ @@ -1416,13 +1415,7 @@ public class HFileBlock implements Cacheable { /** Default context used when BlockType != {@link BlockType#ENCODED_DATA}. */ private final HFileBlockDefaultDecodingContext defaultDecodingCtx; - private ThreadLocal prefetchedHeaderForThread = - new ThreadLocal() { - @Override - public PrefetchedHeader initialValue() { - return new PrefetchedHeader(); - } - }; + private final PrefetchedHeader prefetchedHeader; public FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, HFileSystem hfs, Path path, HFileContext fileContext) throws IOException { @@ -1432,6 +1425,7 @@ public class HFileBlock implements Cacheable { this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum()); defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext); encodedBlockDecodingCtx = defaultDecodingCtx; + this.prefetchedHeader = new PrefetchedHeader(); } /** @@ -1566,11 +1560,14 @@ public class HFileBlock implements Cacheable { // read this block's header as part of the previous read's look-ahead. // And we also want to skip reading the header again if it has already // been read. - // TODO: How often does this optimization fire? Has to be same thread so the thread local - // is pertinent and we have to be reading next block as in a big scan. - PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get(); - ByteBuffer headerBuf = prefetchedHeader.offset == offset? prefetchedHeader.buf: null; - + ByteBuffer headerBuf = null; + synchronized (prefetchedHeader) { + if(prefetchedHeader.offset == offset) { + byte[] buf = new byte[hdrSize]; + System.arraycopy(prefetchedHeader.header, 0, buf, 0, hdrSize); + headerBuf = ByteBuffer.wrap(buf); + } + } // Allocate enough space to fit the next block's header too. int nextBlockOnDiskSize = 0; byte[] onDiskBlock = null; @@ -1671,12 +1668,13 @@ public class HFileBlock implements Cacheable { b.nextBlockOnDiskSizeWithHeader = nextBlockOnDiskSize; - // Set prefetched header - if (b.hasNextBlockHeader()) { - prefetchedHeader.offset = offset + b.getOnDiskSizeWithHeader(); - System.arraycopy(onDiskBlock, onDiskSizeWithHeader, prefetchedHeader.header, 0, hdrSize); + synchronized (prefetchedHeader) { + // Set prefetched header + if (b.hasNextBlockHeader()) { + prefetchedHeader.offset = offset + b.getOnDiskSizeWithHeader(); + System.arraycopy(onDiskBlock, onDiskSizeWithHeader, prefetchedHeader.header, 0, hdrSize); + } } - b.offset = offset; b.fileContext.setIncludesTags(this.fileContext.isIncludesTags()); b.fileContext.setIncludesMvcc(this.fileContext.isIncludesMvcc());