Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (revision 1541258) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (working copy) @@ -155,6 +155,8 @@ ScanType.USER_SCAN, Long.MAX_VALUE, HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS); + this.store.addChangedReaderObserver(this); + // Pass columns to try to filter out unnecessary StoreFiles. List scanners = getScannersNoCompaction(); @@ -184,8 +186,6 @@ // Combine all seeked scanners with a heap heap = new KeyValueHeap(scanners, store.getComparator()); - - this.store.addChangedReaderObserver(this); } /** @@ -235,6 +235,8 @@ earliestPutTs, oldestUnexpiredTS, dropDeletesFromRow, dropDeletesToRow); } + this.store.addChangedReaderObserver(this); + // Filter the list of scanners using Bloom filters, time range, TTL, etc. scanners = selectScannersFrom(scanners); @@ -280,6 +282,10 @@ this.matcher = new ScanQueryMatcher(scan, scanInfo, columns, scanType, Long.MAX_VALUE, earliestPutTs, oldestUnexpiredTS); + // In unit tests, the store could be null + if (this.store != null) { + this.store.addChangedReaderObserver(this); + } // Seek all scanners to the initial key if (!isParallelSeekEnabled) { for (KeyValueScanner scanner : scanners) { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java (revision 1541258) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java (working copy) @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FileLink; +import org.apache.hadoop.hbase.util.Pair; import com.google.common.annotations.VisibleForTesting; @@ -48,9 +49,7 @@ * 2.2) If HBase checksum can be used, we'll open {@link #streamNoFsChecksum}, * and close {@link #stream}. User MUST call prepareForBlockReader for that to happen; * if they don't, (2.1) will be the default. - * 3) The users can call {@link #shouldUseHBaseChecksum()}, and pass its result to - * {@link #getStream(boolean)} to get stream (if Java had out/pointer params we could - * return both in one call). This stream is guaranteed to be set. + * 3) The users can call {@link #getStream()} to get stream. This stream is guaranteed to be set. * 4) The first time HBase checksum fails, one would call {@link #fallbackToFsChecksum(int)}. * That will take lock, and open {@link #stream}. While this is going on, others will * continue to use the old stream; if they also want to fall back, they'll also call @@ -139,19 +138,14 @@ } /** - * @return Whether we are presently using HBase checksum. - */ - public boolean shouldUseHBaseChecksum() { - return this.useHBaseChecksum; - } - - /** * Get the stream to use. Thread-safe. - * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned - * at some point in the past, otherwise the result is undefined. + * @return Pair whose first member is the input stream to read from + * and second member indicating whether we are presently using HBase checksum. */ - public FSDataInputStream getStream(boolean useHBaseChecksum) { - return useHBaseChecksum ? this.streamNoFsChecksum : this.stream; + public Pair getStream() { + return useHBaseChecksum ? + new Pair(this.streamNoFsChecksum, useHBaseChecksum) : + new Pair(this.stream, useHBaseChecksum); } /** Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (revision 1541258) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (working copy) @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CompoundBloomFilter; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; @@ -1312,8 +1313,9 @@ // thread-safe but the one constaint is that if we decide // to skip hbase checksum verification then we are // guaranteed to use hdfs checksum verification. - boolean doVerificationThruHBaseChecksum = streamWrapper.shouldUseHBaseChecksum(); - FSDataInputStream is = streamWrapper.getStream(doVerificationThruHBaseChecksum); + Pair pair = streamWrapper.getStream(); + boolean doVerificationThruHBaseChecksum = pair.getSecond(); + FSDataInputStream is = pair.getFirst(); HFileBlock blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (revision 1541258) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (working copy) @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Writable; import com.google.protobuf.ZeroCopyLiteralByteString; @@ -520,9 +521,10 @@ HFileSystem hfs) throws IOException { FixedFileTrailer trailer = null; try { - boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum(); + Pair pair = fsdis.getStream(); + boolean isHBaseChecksum = pair.getSecond(); assert !isHBaseChecksum; // Initially we must read with FS checksum. - trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size); + trailer = FixedFileTrailer.readFromStream(pair.getFirst(), size); switch (trailer.getMajorVersion()) { case 2: return new HFileReaderV2(