From 639b867f20d1e0a6c0fda192eef91df6a8769ada Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Fri, 26 Jun 2020 17:11:32 -0700 Subject: [PATCH] W-7665966 Instrument low level scan details Reason: Debugging Test Plan: Unit tests, manual verification --- .../hadoop/hbase/io/hfile/HFileBlock.java | 29 ++++- .../hbase/io/hfile/HFileReaderImpl.java | 120 +++++++++++++++++- .../apache/hadoop/hbase/ipc/CallRunner.java | 4 + .../org/apache/hadoop/hbase/ipc/RpcCall.java | 5 + .../apache/hadoop/hbase/ipc/ServerCall.java | 96 ++++++++++++++ .../hbase/regionserver/ScannerContext.java | 5 +- .../hbase/regionserver/SegmentScanner.java | 46 +++++++ .../hbase/regionserver/StoreFileScanner.java | 52 ++++++++ .../hbase/regionserver/StoreScanner.java | 13 ++ .../querymatcher/ScanQueryMatcher.java | 4 + .../querymatcher/UserScanQueryMatcher.java | 17 ++- 11 files changed, 384 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index ebc456413e..7fcc2debf6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -40,12 +40,15 @@ import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.io.ByteBuffInputStream; import org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.EncodingState; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.MultiByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; @@ -1510,10 +1513,34 @@ public class HFileBlock implements Cacheable { if (offset >= endOffset) { return null; } + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } HFileBlock b = readBlockData(offset, length, false, false); offset += b.getOnDiskSizeWithHeader(); length = b.getNextBlockOnDiskSize(); - return b.unpack(fileContext, owner); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("block_reads", 1); + start = System.nanoTime(); + } + b = b.unpack(fileContext, owner); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("block_unpacks", 1); + if (b.getHFileContext().getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("block_decrypt_" + + b.getHFileContext().getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (b.getHFileContext().getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("block_decompress_" + + b.getHFileContext().getCompression().getName().toLowerCase(), 1); + } + } + return b; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index d1b3a89f1e..bcef691422 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.security.EncryptionUtil; @@ -718,6 +719,13 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { * key) */ protected int blockSeek(Cell key, boolean seekBefore) { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + int klen, vlen, tlen = 0; int lastKeyValueSize = -1; int offsetFromPos; @@ -737,6 +745,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { offsetFromPos += Bytes.SIZEOF_LONG; blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair); bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("block_seek_keys", 1); + } int comp = PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv); offsetFromPos += klen + vlen; @@ -799,6 +810,14 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { blockBuffer.moveBack(lastKeyValueSize); readKeyValueLen(); return 1; // didn't exactly find it. + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_seeks", 1); + ServerCall.updateCurrentCallMetric("block_seek_ns", end - start); + } + } } @Override @@ -1325,15 +1344,41 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock, boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } // Check cache for block. If found return. BlockCache cache = cacheConf.getBlockCache().orElse(null); if (cache != null) { HFileBlock cachedBlock = (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics); if (cachedBlock != null) { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("cached_block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("cached_block_reads", 1); + } if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { HFileBlock compressedBlock = cachedBlock; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("cached_block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("cached_block_unpacks", 1); + if (hfileContext.getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("block_decrypt_" + + hfileContext.getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (hfileContext.getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("cached_block_decompress_" + + hfileContext.getCompression().getName().toLowerCase(), 1); + } + } + // In case of compressed block after unpacking we can return the compressed block if (compressedBlock != cachedBlock) { cache.returnBlock(cacheKey, compressedBlock); @@ -1438,13 +1483,36 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { } // Cache Miss, please load. - HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false). - unpack(hfileContext, fsBlockReader); + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("block_reads", 1); + start = System.nanoTime(); + } + HFileBlock unpackedBlock = metaBlock.unpack(hfileContext, fsBlockReader); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("block_unpacks", 1); + if (hfileContext.getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("block_decrypt_" + + hfileContext.getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (hfileContext.getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("block_decompress_" + + hfileContext.getCompression().getName().toLowerCase(), 1); + } + } // Cache the block if (cacheBlock) { cacheConf.getBlockCache() - .ifPresent(cache -> cache.cacheBlock(cacheKey, metaBlock, cacheConf.isInMemory())); + .ifPresent(cache -> cache.cacheBlock(cacheKey, unpackedBlock, cacheConf.isInMemory())); } return metaBlock; } @@ -1524,10 +1592,33 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { TraceUtil.addTimelineAnnotation("blockCacheMiss"); // Load block from filesystem. + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, !isCompaction); validateBlockType(hfileBlock, expectedBlockType); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("block_reads", 1); + start = System.nanoTime(); + } HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("block_unpacks", 1); + if (hfileContext.getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("block_decrypt_" + + hfileContext.getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (hfileContext.getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("block_decompress_" + + hfileContext.getCompression().getName().toLowerCase(), 1); + } + } BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory(); // Cache the block if necessary @@ -1706,11 +1797,21 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { @Override protected boolean processFirstDataBlock() throws IOException { seeker.rewind(); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("seeker_rewind", 1); + } return true; } @Override public boolean next() throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + boolean isValid = seeker.next(); if (!isValid) { HFileBlock newBlock = readNextDataBlock(); @@ -1722,6 +1823,16 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { } } return isValid; + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("seeker_" + + getEffectiveDataBlockEncoding().name().toLowerCase() + "_next_ns", end - start); + ServerCall.updateCurrentCallMetric("seeker_" + + getEffectiveDataBlockEncoding().name().toLowerCase() + "_next", 1); + } + } } @Override @@ -1774,6 +1885,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { updateCurrentBlock(seekToBlock); } else if (rewind) { seeker.rewind(); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("seeker_rewind", 1); + } } this.nextIndexedKey = nextIndexedKey; return seeker.seekToKeyInBlock(key, seekBefore); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 48ee664693..ed8e092fb6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -191,6 +191,10 @@ public class CallRunner { if (!sucessful) { this.rpcServer.addCallSize(call.getSize() * -1); } + // Call metrics trace + if (ServerCall.isTracing()) { + ServerCall.logCallTrace(call, sucessful); + } cleanup(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 7571ac1539..5ffe268aa7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -23,6 +23,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import java.io.IOException; +import java.util.Map; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -132,4 +133,8 @@ public interface RpcCall extends RpcCallContext { * @return A short string format of this call without possibly lengthy params */ String toShortString(); + + Map getCallMetrics(); + void setCallMetric(String name, long value); + long updateCallMetric(String name, long delta); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index 881828bf97..640c544bc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -21,12 +21,19 @@ import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; import org.apache.hadoop.hbase.io.ByteBufferPool; @@ -91,12 +98,24 @@ public abstract class ServerCall implements RpcCa private long exceptionSize = 0; private final boolean retryImmediatelySupported; + // Call metrics, counters for detailed trace of internal actions and decisions + // Access is expected to be single threaded. Synchronization is mainly to avoid + // concurrent access exceptions if this invariant is violated by mistake; should + // always be uncontended. + private Map callCounters = new HashMap<>(); + // This is a dirty hack to address HBASE-22539. The lowest bit is for normal rpc cleanup, and the // second bit is for WAL reference. We can only call release if both of them are zero. The reason // why we can not use a general reference counting is that, we may call cleanup multiple times in // the current implementation. We should fix this in the future. private final AtomicInteger reference = new AtomicInteger(0b01); + static final Logger LOG = LoggerFactory.getLogger(ServerCall.class); + + public static boolean isTracing() { + return LOG.isTraceEnabled(); + } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "Can't figure why this complaint is happening... see below") ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, @@ -542,4 +561,81 @@ public abstract class ServerCall implements RpcCa public synchronized BufferChain getResponse() { return response; } + + @Override + public void setCallMetric(String name, long value) { + AtomicLong counter; + synchronized (callCounters) { // Expected to always be the uncontended case + counter = callCounters.get(name); + if (counter == null) { + counter = new AtomicLong(); + callCounters.put(name, counter); + } + } + counter.set(value); + } + + @Override + public long updateCallMetric(String name, long delta) { + AtomicLong counter; + synchronized (callCounters) { // Expected to always be the uncontended case + counter = callCounters.get(name); + if (counter == null) { + counter = new AtomicLong(); + callCounters.put(name, counter); + } + } + return counter.addAndGet(delta); + } + + @Override + public Map getCallMetrics() { + Map map = new TreeMap<>(); // TreeMap so enumeration will be sorted + synchronized (callCounters) { // Expected to always be the uncontended case + for (Map.Entry e: callCounters.entrySet()) { + map.put(e.getKey(), e.getValue().longValue()); + } + } + return map; + } + + public static Map getCurrentCallMetrics() { + Optional call = RpcServer.getCurrentCall(); + if (call.isPresent()) { + return call.get().getCallMetrics(); + } + return new HashMap<>(); + } + + public static void setCurrentCallMetric(String name, long delta) { + Optional call = RpcServer.getCurrentCall(); + if (call.isPresent()) { + call.get().setCallMetric(name, delta); + } + } + + public static long updateCurrentCallMetric(String name, long delta) { + Optional call = RpcServer.getCurrentCall(); + if (call.isPresent()) { + return call.get().updateCallMetric(name, delta); + } + return 0; + } + + public static void logCallTrace(RpcCall call, boolean sucessful) { + StringBuffer sb = new StringBuffer(); + sb.append(call.toShortString()); + sb.append(" successful: "); sb.append(sucessful); + Map metrics = call.getCallMetrics(); + if (!metrics.isEmpty()) { + sb.append(" metrics: ["); + for (Map.Entry e: metrics.entrySet()) { + sb.append(" \""); sb.append(e.getKey()); sb.append("\": "); + sb.append(e.getValue()); + } + sb.append(" ]"); + } + LOG.trace(sb.toString()); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index 4ba69130f1..3d401b774c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; +import org.apache.hadoop.hbase.ipc.ServerCall; /** * ScannerContext instances encapsulate limit tracking AND progress towards those limits during @@ -247,8 +248,10 @@ public class ScannerContext { if (!NextState.isValidState(state)) { throw new IllegalArgumentException("Cannot set to invalid state: " + state); } - this.scannerState = state; + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("scanner_state_" + state.name().toLowerCase(), 1); + } return state; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java index ec1ec0b472..4351533cfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.ipc.ServerCall; /** * A scanner of a single memstore segment. @@ -97,12 +98,27 @@ public class SegmentScanner implements KeyValueScanner { */ @Override public Cell next() throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (closed) { return null; } Cell oldCurrent = current; updateCurrent(); // update the currently observed Cell return oldCurrent; + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("memstore_next_ns", end - start); + ServerCall.updateCurrentCallMetric("memstore_next", 1); + } + } } /** @@ -112,6 +128,13 @@ public class SegmentScanner implements KeyValueScanner { */ @Override public boolean seek(Cell cell) throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (closed) { return false; } @@ -125,6 +148,14 @@ public class SegmentScanner implements KeyValueScanner { last = null; updateCurrent(); return (current != null); + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("memstore_seek_ns", end - start); + ServerCall.updateCurrentCallMetric("memstore_seek", 1); + } + } } protected Iterator getIterator(Cell cell) { @@ -142,6 +173,13 @@ public class SegmentScanner implements KeyValueScanner { */ @Override public boolean reseek(Cell cell) throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (closed) { return false; } @@ -156,6 +194,14 @@ public class SegmentScanner implements KeyValueScanner { iter = getIterator(getHighest(cell, last)); updateCurrent(); return (current != null); + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("memstore_reseek_ns", end - start); + ServerCall.updateCurrentCallMetric("memstore_reseek", 1); + } + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index b5b853a5cb..810d54595b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -40,6 +40,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; /** @@ -200,6 +201,13 @@ public class StoreFileScanner implements KeyValueScanner { @Override public Cell next() throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + Cell retKey = cur; try { @@ -217,10 +225,27 @@ public class StoreFileScanner implements KeyValueScanner { throw new IOException("Could not iterate " + this, e); } return retKey; + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("store_next", 1); + ServerCall.updateCurrentCallMetric("store_next_" + hfs.getReader().getName(), 1); + ServerCall.updateCurrentCallMetric("store_next_ns", end - start); + ServerCall.updateCurrentCallMetric("store_next_ns_" + hfs.getReader().getName(), end - start); + } + } } @Override public boolean seek(Cell key) throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (seekCount != null) seekCount.increment(); try { @@ -245,10 +270,27 @@ public class StoreFileScanner implements KeyValueScanner { } catch (IOException ioe) { throw new IOException("Could not seek " + this + " to key " + key, ioe); } + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("store_seek", 1); + ServerCall.updateCurrentCallMetric("store_seek_" + hfs.getReader().getName(), 1); + ServerCall.updateCurrentCallMetric("store_seek_ns", end - start); + ServerCall.updateCurrentCallMetric("store_seek_ns_" + hfs.getReader().getName(), end - start); + } + } } @Override public boolean reseek(Cell key) throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (seekCount != null) seekCount.increment(); try { @@ -273,6 +315,16 @@ public class StoreFileScanner implements KeyValueScanner { throw new IOException("Could not reseek " + this + " to key " + key, ioe); } + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("store_reseek", 1); + ServerCall.updateCurrentCallMetric("store_reseek_" + hfs.getReader().getName(), 1); + ServerCall.updateCurrentCallMetric("store_reseek_ns", end - start); + ServerCall.updateCurrentCallMetric("store_reseek_ns_" + hfs.getReader().getName(), end - start); + } + } } protected void setCurrentCell(Cell newVal) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 3ad3be8614..8f0021f218 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler; @@ -536,6 +537,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if (checkFlushed()) { reopenAfterFlush(); } + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("seeks", 1); + } return this.heap.seek(key); } @@ -624,6 +628,12 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner scannerContext.setLastPeekedCell(cell); topChanged = false; ScanQueryMatcher.MatchCode qcode = matcher.match(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("sqm_hint_" + qcode.toString().toLowerCase(), 1); + ServerCall.updateCurrentCallMetric("cells_matched", 1); + ServerCall.updateCurrentCallMetric("cells_matched__" + this.store.getRegionInfo().getRegionNameAsString() + "__" + + this.store.getColumnFamilyName(), 1); + } switch (qcode) { case INCLUDE: case INCLUDE_AND_SEEK_NEXT_ROW: @@ -1088,6 +1098,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if (checkFlushed()) { reopenAfterFlush(); } + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("reseeks", 1); + } if (explicitColumnQuery && lazySeekEnabledGlobally) { return heap.requestSeek(kv, true, useRowColBloom); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java index 96d3bab614..31a6f7e0a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ShipperListener; @@ -207,6 +208,9 @@ public abstract class ScanQueryMatcher implements ShipperListener { } // MvccSensitiveTracker always need check all cells to save some infos. DeleteResult deleteResult = deletes.isDeleted(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("delete_hint_" + deleteResult.toString().toLowerCase(), 1); + } switch (deleteResult) { case FAMILY_DELETED: case COLUMN_DELETED: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java index cc994466b3..83e352f9a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.util.Pair; @@ -132,6 +133,9 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { } // STEP 1: Check if the column is part of the requested columns MatchCode matchCode = columns.checkColumn(cell, typeByte); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("column_hint_" + matchCode.toString().toLowerCase(), 1); + } if (matchCode != MatchCode.INCLUDE) { return matchCode; } @@ -140,6 +144,9 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { * INCLUDE, INCLUDE_AND_SEEK_NEXT_COL, or INCLUDE_AND_SEEK_NEXT_ROW. */ matchCode = columns.checkVersions(cell, timestamp, typeByte, false); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("versions_hint_" + matchCode.toString().toLowerCase(), 1); + } switch (matchCode) { case SKIP: return MatchCode.SKIP; @@ -152,8 +159,14 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { break; } - return filter == null ? matchCode : mergeFilterResponse(cell, matchCode, - filter.filterCell(cell)); + if (filter != null) { + ReturnCode filterResponse = filter.filterCell(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("filter_hint_" + filterResponse.toString().toLowerCase(), 1); + } + return mergeFilterResponse(cell, matchCode, filterResponse); + } + return matchCode; } /** -- 2.26.2