Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (revision 1538859) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (working copy) @@ -1170,20 +1170,22 @@ * Does a positional read or a seek and read into the given buffer. Returns * the on-disk size of the next block, or -1 if it could not be determined. * + * @param istream The input source of data * @param dest destination buffer * @param destOffset offset in the destination buffer * @param size size of the block to be read * @param peekIntoNextBlock whether to read the next block's on-disk size * @param fileOffset position in the stream to read at * @param pread whether we should do a positional read - * @param istream The input source of data + * @param verifyChecksum Whether to use HBase checksums. + * If HBase checksum is switched off, then use HDFS checksum. * @return the on-disk size of the next block with header size included, or * -1 if it could not be determined * @throws IOException */ protected int readAtOffset(FSDataInputStream istream, byte[] dest, int destOffset, int size, - boolean peekIntoNextBlock, long fileOffset, boolean pread) + boolean peekIntoNextBlock, long fileOffset, boolean pread, boolean verifyChecksum) throws IOException { if (peekIntoNextBlock && destOffset + size + hdrSize > dest.length) { @@ -1197,6 +1199,10 @@ if (!pread && streamLock.tryLock()) { // Seek + read. Better for scanning. try { + if (istream == null) { + throw new IOException("stream closed. size=" + size + "; verifyChecksum=" + + verifyChecksum + "; pread="+pread); + } istream.seek(fileOffset); long realOffset = istream.getPos(); @@ -1218,6 +1224,10 @@ streamLock.unlock(); } } else { + if (istream == null) { + throw new IOException("stream closed. size=" + size + ". verifyChecksum=" + + verifyChecksum + "; pread="+pread); + } // Positional read. Better for random reads; or when the streamLock is already locked. int extraSize = peekIntoNextBlock ? hdrSize : 0; @@ -1435,7 +1445,7 @@ onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize]; nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, preReadHeaderSize, onDiskSizeWithHeader - preReadHeaderSize, - true, offset + preReadHeaderSize, pread); + true, offset + preReadHeaderSize, pread, verifyChecksum); if (headerBuf != null) { // the header has been read when reading the previous block, copy // to this block's header @@ -1485,7 +1495,7 @@ // to the block index. This is costly and should happen very rarely. headerBuf = ByteBuffer.allocate(hdrSize); readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), - hdrSize, false, offset, pread); + hdrSize, false, offset, pread, verifyChecksum); } b = new HFileBlock(headerBuf, this.fileContext.shouldUseHBaseChecksum()); onDiskBlock = new byte[b.getOnDiskSizeWithHeader() + hdrSize]; @@ -1493,7 +1503,7 @@ headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize); nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, hdrSize, b.getOnDiskSizeWithHeader() - - hdrSize, true, offset + hdrSize, pread); + - hdrSize, true, offset + hdrSize, pread, verifyChecksum); onDiskSizeWithHeader = b.onDiskSizeWithoutHeader + hdrSize; } Algorithm compressAlgo = fileContext.getCompression();