diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 6079700..02c4a04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -24,6 +24,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -47,6 +48,9 @@ import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.io.IOUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -86,6 +90,7 @@ import com.google.common.base.Preconditions; @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="HE_EQUALS_USE_HASHCODE", justification="Fix!!! Fine for now bug FIXXXXXXX!!!!") public class HFileBlock implements Cacheable { + private static final Log LOG = LogFactory.getLog(HFileBlock.class); /** * On a checksum failure on a Reader, these many suceeding read @@ -1370,13 +1375,17 @@ public class HFileBlock implements Cacheable { final FSReader owner = this; // handle for inner class return new BlockIterator() { private long offset = startOffset; + // Cache length of next block. Current block has the length of next block in it. + private long length = -1; @Override public HFileBlock nextBlock() throws IOException { - if (offset >= endOffset) + if (offset >= endOffset) { return null; - HFileBlock b = readBlockData(offset, -1, -1, false); + } + HFileBlock b = readBlockData(offset, length, -1, false); offset += b.getOnDiskSizeWithHeader(); + length = b.getNextBlockOnDiskSizeWithHeader(); return b.unpack(fileContext, owner); } @@ -1458,13 +1467,24 @@ public class HFileBlock implements Cacheable { } /** - * We always prefetch the header of the next block, so that we know its - * on-disk size in advance and can read it in one operation. + * Data-structure to use caching the header of the NEXT block. Only works if next read + * that comes in here is next in sequence in this block. + * + * When we read, we read current block and the next blocks' header. We do this so we have + * the length of the next block to read if the hfile index is not available (rare). + * TODO: Review!! This trick of reading next blocks header is a pain, complicates our + * read path and I don't think it needed given it rare we don't have the block index + * (it is 'normally' present, gotten from the hfile index). FIX!!! */ private static class PrefetchedHeader { long offset = -1; - byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; + byte [] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE); + + @Override + public String toString() { + return "offset=" + this.offset + ", header=" + Bytes.toStringBinary(header); + } } /** Reads version 2 blocks from the filesystem. */ @@ -1478,13 +1498,14 @@ public class HFileBlock implements Cacheable { /** Default context used when BlockType != {@link BlockType#ENCODED_DATA}. */ private final HFileBlockDefaultDecodingContext defaultDecodingCtx; - private ThreadLocal prefetchedHeaderForThread = - new ThreadLocal() { - @Override - public PrefetchedHeader initialValue() { - return new PrefetchedHeader(); - } - }; + /** + * Cache of the NEXT header after this. Check it is indeed next blocks header + * before using it. TODO: Review. This overread into next block to fetch + * next blocks header seems unnecessary given we usually get the block size + * from the hfile index. Review! + */ + private AtomicReference prefetchedHeader = + new AtomicReference(new PrefetchedHeader()); public FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, HFileSystem hfs, Path path, HFileContext fileContext) throws IOException { @@ -1505,17 +1526,6 @@ public class HFileBlock implements Cacheable { this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext); } - /** - * Reads a version 2 block (version 1 blocks not supported and not expected). Tries to do as - * little memory allocation as possible, using the provided on-disk size. - * - * @param offset the offset in the stream to read at - * @param onDiskSizeWithHeaderL the on-disk size of the block, including - * the header, or -1 if unknown - * @param uncompressedSize the uncompressed size of the the block. Always - * expected to be -1. This parameter is only used in version 1. - * @param pread whether to use a positional read - */ @Override public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, int uncompressedSize, boolean pread) @@ -1584,13 +1594,42 @@ public class HFileBlock implements Cacheable { } /** + * Check atomic reference cache for this block's header. Cache only good if next + * read coming through is next in sequence in the block. We read next block's + * header on the tail of reading the previous block to save a seek. Otherwise, + * we have to do a seek to read the header before we can pull in the block OR + * we have to backup the stream because we over-read (the next block's header). + * @see PrefetchedHeader + * @return The cached block header or null if not found. + * @see #cacheNextBlockHeader(long, byte[], int, int) + */ + private ByteBuffer getCachedHeader(final long offset) { + PrefetchedHeader ph = this.prefetchedHeader.get(); + return ph != null && ph.offset == offset? ph.buf: null; + } + + /** + * Save away the next blocks header in atomic reference. + * @see #getCachedHeader(long) + * @see PrefetchedHeader + */ + private void cacheNextBlockHeader(final long offset, + final byte [] header, final int headerOffset, final int headerLength) { + PrefetchedHeader ph = new PrefetchedHeader(); + ph.offset = offset; + System.arraycopy(header, headerOffset, ph.header, 0, headerLength); + this.prefetchedHeader.set(ph); + } + + /** * Reads a version 2 block. * - * @param offset the offset in the stream to read at - * @param onDiskSizeWithHeader the on-disk size of the block, including - * the header, or -1 if unknown - * @param uncompressedSize the uncompressed size of the the block. Always - * expected to be -1. This parameter is only used in version 1. + * @param offset the offset in the stream to read at. Usually the + * @param onDiskSizeWithHeaderL the on-disk size of the block, including + * the header and checksums if present or -1 if unknown (as a long). Can be -1 + * if we are doing raw iteration of blocks as when loading up file metadata; i.e. + * the first read of a new file (TODO: Fix! See HBASE-17072). Usually non-null gotten + * from the file index. * @param pread whether to use a positional read * @param verifyChecksum Whether to use HBase checksums. * If HBase checksum is switched off, then use HDFS checksum. @@ -1619,15 +1658,10 @@ public class HFileBlock implements Cacheable { + offset + ", uncompressedSize=" + uncompressedSize + ")"); } - // See if we can avoid reading the header. This is desirable, because - // we will not incur a backward seek operation if we have already - // read this block's header as part of the previous read's look-ahead. - // And we also want to skip reading the header again if it has already - // been read. - // TODO: How often does this optimization fire? Has to be same thread so the thread local - // is pertinent and we have to be reading next block as in a big scan. - PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get(); - ByteBuffer headerBuf = prefetchedHeader.offset == offset? prefetchedHeader.buf: null; + // Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1 + // and will save us having to seek the stream backwards to reread the header we + // read the last time through here. + ByteBuffer headerBuf = getCachedHeader(offset); // Allocate enough space to fit the next block's header too. int nextBlockOnDiskSize = 0; @@ -1665,22 +1699,17 @@ public class HFileBlock implements Cacheable { validateOnDiskSizeWithoutHeader(expectedOnDiskSizeWithoutHeader, actualOnDiskSizeWithoutHeader, headerBuf, offset); } else { - // Check headerBuf to see if we have read this block's header as part of - // reading the previous block. This is an optimization of peeking into - // the next block's header (e.g.this block's header) when reading the - // previous block. This is the faster and more preferable case. If the - // header is already there, don't read the header again. - - // Unfortunately, we still have to do a separate read operation to - // read the header. + // We were not passed the block size. Need to get it from the header. If header was not in + // cache, need to seek to pull it in. This is costly and should happen very rarely. + // Currently happens on open of a hfile reader where we read the trailer blocks for + // indices. Otherwise, we are reading block sizes out of the hfile index. To check, + // enable TRACE in this file and you'll get an exception in a LOG every time we seek. + // See HBASE-17072 for more detail. if (headerBuf == null) { - // From the header, determine the on-disk size of the given hfile - // block, and read the remaining data, thereby incurring two read - // operations. This might happen when we are doing the first read - // in a series of reads or a random read, and we don't have access - // to the block index. This is costly and should happen very rarely. + if (LOG.isTraceEnabled()) { + LOG.trace("Extra see to get block size!", new RuntimeException()); + } headerBuf = ByteBuffer.allocate(hdrSize); - // headerBuf is HBB readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false, offset, pread); } @@ -1712,8 +1741,8 @@ public class HFileBlock implements Cacheable { // Set prefetched header if (b.hasNextBlockHeader()) { - prefetchedHeader.offset = offset + b.getOnDiskSizeWithHeader(); - System.arraycopy(onDiskBlock, onDiskSizeWithHeader, prefetchedHeader.header, 0, hdrSize); + cacheNextBlockHeader(offset + b.getOnDiskSizeWithHeader(), + onDiskBlock, onDiskSizeWithHeader, hdrSize); } b.offset = offset; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 6b2664c..b88ef01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -112,7 +112,7 @@ public class HFileBlockIndex { * blocks at all other levels will be cached in the LRU cache in practice, * although this API does not enforce that. * - * All non-root (leaf and intermediate) index blocks contain what we call a + *

All non-root (leaf and intermediate) index blocks contain what we call a * "secondary index": an array of offsets to the entries within the block. * This allows us to do binary search for the entry corresponding to the * given key without having to deserialize the block. @@ -202,17 +202,12 @@ public class HFileBlockIndex { } /** - * Return the BlockWithScanInfo which contains the DataBlock with other scan - * info such as nextIndexedKey. This function will only be called when the - * HFile version is larger than 1. + * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with + * other scan info such as the key that starts the next HFileBlock. This function will only + * be called when the HFile version is larger than 1. * - * @param key - * the key we are looking for - * @param currentBlock - * the current block, to avoid re-reading the same block - * @param cacheBlocks - * @param pread - * @param isCompaction + * @param key the key we are looking for + * @param currentBlock the current block, to avoid re-reading the same block * @param expectedDataBlockEncoding the data block encoding the caller is * expecting the data block to be in, or null to not perform this * check and return the block irrespective of the encoding.