diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b8303b8..ce382d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -27,10 +27,12 @@ import java.nio.ByteBuffer; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.fs.HFileSystem; @@ -45,6 +47,8 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; @@ -110,7 +114,6 @@ public class HFileBlock implements Cacheable { // meta.usesHBaseChecksum+offset+nextBlockOnDiskSizeWithHeader public static final int EXTRA_SERIALIZATION_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG; - /** * Each checksum value is an integer that can be stored in 4 bytes. */ @@ -1413,6 +1416,14 @@ public class HFileBlock implements Cacheable { private HFileBlockDecodingContext encodedBlockDecodingCtx; + private final int slowReadNs; + + /** + * conf object + */ + protected final Configuration conf; + + private static final int DEFAULT_SLOW_READ_TIME_MS = 100; // in ms /** Default context used when BlockType != {@link BlockType#ENCODED_DATA}. */ private final HFileBlockDefaultDecodingContext defaultDecodingCtx; @@ -1432,6 +1443,11 @@ public class HFileBlock implements Cacheable { this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum()); defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext); encodedBlockDecodingCtx = defaultDecodingCtx; + + this.conf = new HBaseConfiguration(); + this.slowReadNs = + 1000000 * conf.getInt("hbase.regionserver.hlog.slowsync.ms", + DEFAULT_SLOW_READ_TIME_MS); } /** @@ -1560,6 +1576,8 @@ public class HFileBlock implements Cacheable { + offset + ", uncompressedSize=" + uncompressedSize + ")"); } + long startNano = System.nanoTime(); + int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL; // See if we can avoid reading the header. This is desirable, because // we will not incur a backward seek operation if we have already @@ -1680,6 +1698,27 @@ public class HFileBlock implements Cacheable { b.offset = offset; b.fileContext.setIncludesTags(this.fileContext.isIncludesTags()); b.fileContext.setIncludesMvcc(this.fileContext.isIncludesMvcc()); + + + long timeInNanos = System.nanoTime() - startNano; + if (timeInNanos > this.slowReadNs) { + try { + DFSInputStream dfsInputStream = (DFSInputStream) is.getWrappedStream(); + DatanodeInfo datanodeInfo = dfsInputStream.getCurrentDatanode(); + + String msg = new StringBuilder().append("Slow read block: ") + .append(timeInNanos / 1000000) + .append(" block size: ") + .append(onDiskSizeWithHeader) + .append(" uncompressedSize=") + .append(uncompressedSize) + .append(datanodeInfo.toString()).toString(); + HFile.LOG.warn(msg); + + } catch (Exception e) { + e.printStackTrace(); + } + } return b; }