From d78d1fe45f199645aaa5b9300191f37cb205f6af Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Fri, 26 Jun 2020 17:11:23 -0700 Subject: [PATCH] W-7665966 Instrument low level scan details Reason: Debugging Test Plan: Unit tests, manual verification --- .../hadoop/hbase/io/hfile/HFileBlock.java | 31 ++++- .../hadoop/hbase/io/hfile/HFileReaderV2.java | 119 +++++++++++++++++- .../hadoop/hbase/io/hfile/HFileReaderV3.java | 19 +++ .../apache/hadoop/hbase/ipc/CallRunner.java | 15 +-- .../hadoop/hbase/ipc/RpcCallContext.java | 5 + .../apache/hadoop/hbase/ipc/RpcServer.java | 45 +++++++ .../apache/hadoop/hbase/ipc/ServerCall.java | 75 +++++++++++ .../hbase/regionserver/DefaultMemStore.java | 47 ++++++- .../hbase/regionserver/ScannerContext.java | 5 +- .../hbase/regionserver/StoreFileScanner.java | 57 ++++++++- .../hbase/regionserver/StoreScanner.java | 17 ++- .../querymatcher/LegacyScanQueryMatcher.java | 13 ++ .../querymatcher/ScanQueryMatcher.java | 4 + .../querymatcher/UserScanQueryMatcher.java | 10 ++ 14 files changed, 447 insertions(+), 15 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 114d64250f..1cd42365a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -39,11 +39,14 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.io.ByteBufferInputStream; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -1429,10 +1432,36 @@ public class HFileBlock implements Cacheable { if (offset >= endOffset) { return null; } + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } HFileBlock b = readBlockData(offset, length, false, false); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("block_reads", 1); + } offset += b.getOnDiskSizeWithHeader(); length = b.getNextBlockOnDiskSize(); - return b.unpack(fileContext, owner); + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + b = b.unpack(fileContext, owner); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("block_unpacks", 1); + if (b.getHFileContext().getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("block_decrypt_" + + b.getHFileContext().getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (b.getHFileContext().getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("block_decompress_" + + b.getHFileContext().getCompression().getName().toLowerCase(), 1); + } + } + return b; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index 557fb3c65c..f261ef8b4d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -36,10 +36,13 @@ import org.apache.hadoop.hbase.NoTagsKeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IdLock; @@ -275,12 +278,37 @@ public class HFileReaderV2 extends AbstractHFileReader { DataBlockEncoding expectedDataBlockEncoding) throws IOException { // Check cache for block. If found return. if (cacheConf.isBlockCacheEnabled()) { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } BlockCache cache = cacheConf.getBlockCache(); HFileBlock cachedBlock = (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics); if (cachedBlock != null) { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("cached_block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("cached_block_reads", 1); + } if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } cachedBlock = cachedBlock.unpack(hfileContext, fsBlockReader); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("cached_block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("cached_block_unpacks", 1); + if (hfileContext.getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("cached_block_decrypt_" + + hfileContext.getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (hfileContext.getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("cached_block_decompress_" + + hfileContext.getCompression().getName().toLowerCase(), 1); + } + } } validateBlockType(cachedBlock, expectedBlockType); @@ -363,6 +391,9 @@ public class HFileReaderV2 extends AbstractHFileReader { BlockType.META, null); if (cachedBlock != null) { assert cachedBlock.isUnpacked() : "Packed block leak."; + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("cached_block_reads", 1); + } // Return a distinct 'shallow copy' of the block, // so pos does not get messed by the scanner return cachedBlock.getBufferWithoutHeader(); @@ -370,9 +401,32 @@ public class HFileReaderV2 extends AbstractHFileReader { // Cache Miss, please load. } + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset, - blockSize, true, false).unpack(hfileContext, fsBlockReader); - + blockSize, true, false); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("block_reads", 1); + start = System.nanoTime(); + } + metaBlock = metaBlock.unpack(hfileContext, fsBlockReader); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("block_unpacks", 1); + if (hfileContext.getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("block_decrypt_" + + hfileContext.getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (hfileContext.getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("block_decompress_" + + hfileContext.getCompression().getName().toLowerCase(), 1); + } + } // Cache the block if (cacheBlock) { cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock, @@ -451,10 +505,33 @@ public class HFileReaderV2 extends AbstractHFileReader { traceScope.getSpan().addTimelineAnnotation("blockCacheMiss"); } // Load block from filesystem. + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, !isCompaction); validateBlockType(hfileBlock, expectedBlockType); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_read_ns", end - start); + ServerCall.updateCurrentCallMetric("block_reads", 1); + start = System.nanoTime(); + } HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader); + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_unpack_ns", end - start); + ServerCall.updateCurrentCallMetric("block_unpacks", 1); + if (hfileContext.getEncryptionContext() != Encryption.Context.NONE) { + ServerCall.updateCurrentCallMetric("block_decrypt_" + + hfileContext.getEncryptionContext().getCipher().getName().toLowerCase(), 1); + } + if (hfileContext.getCompression() != Compression.Algorithm.NONE) { + ServerCall.updateCurrentCallMetric("block_decompress_" + + hfileContext.getCompression().getName().toLowerCase(), 1); + } + } BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory(); // Cache the block if necessary @@ -1081,6 +1158,13 @@ public class HFileReaderV2 extends AbstractHFileReader { long memstoreTS = 0; int memstoreTSLen = 0; int lastKeyValueSize = -1; + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + KeyValue.KeyOnlyKeyValue keyOnlykv = new KeyValue.KeyOnlyKeyValue(); do { blockBuffer.mark(); @@ -1107,6 +1191,9 @@ public class HFileReaderV2 extends AbstractHFileReader { int keyOffset = blockBuffer.arrayOffset() + blockBuffer.position() + KEY_VALUE_LEN_SIZE; keyOnlykv.setKey(blockBuffer.array(), keyOffset, klen); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("block_seek_keys", 1); + } int comp = reader.getComparator().compareOnlyKeyPortion(key, keyOnlykv); if (comp == 0) { @@ -1151,6 +1238,14 @@ public class HFileReaderV2 extends AbstractHFileReader { blockBuffer.position(blockBuffer.position() - lastKeyValueSize); readKeyValueLen(); return 1; // didn't exactly find it. + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_seek_ns", end - start); + ServerCall.updateCurrentCallMetric("block_seeks", 1); + } + } } @Override @@ -1269,6 +1364,9 @@ public class HFileReaderV2 extends AbstractHFileReader { reader.getTrailer().getFirstDataBlockOffset(); if (block != null && block.getOffset() == firstDataBlockOffset) { seeker.rewind(); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("seeker_rewind", 1); + } return true; } @@ -1283,6 +1381,11 @@ public class HFileReaderV2 extends AbstractHFileReader { @Override public boolean next() throws IOException { + long start = 0, end = 0; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + try { boolean isValid = seeker.next(); if (!isValid) { block = readNextDataBlock(); @@ -1292,6 +1395,15 @@ public class HFileReaderV2 extends AbstractHFileReader { } } return isValid; + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("seeker_" + + getEffectiveDataBlockEncoding().name().toLowerCase() + "_next_ns", end - start); + ServerCall.updateCurrentCallMetric("seeker_" + + getEffectiveDataBlockEncoding().name().toLowerCase() + "_next", 1); + } + } } @Override @@ -1351,6 +1463,9 @@ public class HFileReaderV2 extends AbstractHFileReader { updateCurrentBlock(seekToBlock); } else if (rewind) { seeker.rewind(); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("seeker_rewind", 1); + } } this.nextIndexedKey = nextIndexedKey; return seeker.seekToKeyInBlock(key, seekBefore); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java index dcea00786c..4532110f8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -272,6 +273,13 @@ public class HFileReaderV3 extends HFileReaderV2 { */ @Override protected int blockSeek(Cell key, boolean seekBefore) { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + int klen, vlen, tlen = 0; long memstoreTS = 0; int memstoreTSLen = 0; @@ -317,6 +325,9 @@ public class HFileReaderV3 extends HFileReaderV2 { int keyOffset = blockBuffer.arrayOffset() + blockBuffer.position() + (Bytes.SIZEOF_INT * 2); keyOnlyKv.setKey(blockBuffer.array(), keyOffset, klen); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("block_seek_keys", 1); + } int comp = reader.getComparator().compareOnlyKeyPortion(key, keyOnlyKv); if (comp == 0) { @@ -365,6 +376,14 @@ public class HFileReaderV3 extends HFileReaderV2 { blockBuffer.position(blockBuffer.position() - lastKeyValueSize); readKeyValueLen(); return 1; // didn't exactly find it. + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("block_seek_ns", end - start); + ServerCall.updateCurrentCallMetric("block_seeks", 1); + } + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 1a9d8b236d..3a27c95952 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; import java.nio.channels.ClosedChannelException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -45,7 +43,6 @@ import com.google.protobuf.Message; @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving public class CallRunner { - private static final Log LOG = LogFactory.getLog(CallRunner.class); private static final CallDroppedException CALL_DROPPED_EXCEPTION = new CallDroppedException(); @@ -53,7 +50,7 @@ public class CallRunner { private Call call; private RpcServerInterface rpcServer; private MonitoredRPCHandler status; - private volatile boolean sucessful; + private volatile boolean successful; /** * On construction, adds the size of this call to the running count of outstanding call sizes. @@ -140,7 +137,7 @@ public class CallRunner { RpcServer.CurCall.set(null); if (resultPair != null) { this.rpcServer.addCallSize(call.getSize() * -1); - sucessful = true; + successful = true; } } // Set the response @@ -170,9 +167,13 @@ public class CallRunner { RpcServer.LOG.warn(Thread.currentThread().getName() + ": caught: " + StringUtils.stringifyException(e)); } finally { - if (!sucessful) { + if (!successful) { this.rpcServer.addCallSize(call.getSize() * -1); } + // Call metrics trace + if (ServerCall.isTracing()) { + ServerCall.logCallTrace(call, successful); + } cleanup(); } } @@ -204,7 +205,7 @@ public class CallRunner { RpcServer.LOG.warn(Thread.currentThread().getName() + ": caught: " + StringUtils.stringifyException(e)); } finally { - if (!sucessful) { + if (!successful) { this.rpcServer.addCallSize(call.getSize() * -1); } cleanup(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 8e329b5cb8..31888047c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetAddress; +import java.util.Map; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo; @@ -95,4 +96,8 @@ public interface RpcCallContext { * @return The system timestamp of deadline. */ long getDeadline(); + + Map getCallMetrics(); + void setCallMetric(String name, long value); + long updateCallMetric(String name, long delta); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 30745cc07a..8921e27085 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -63,10 +63,12 @@ import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -352,6 +354,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private long exceptionSize = 0; private boolean retryImmediatelySupported; + // Call metrics, counters for detailed trace of internal actions and decisions + // Access is expected to be single threaded. Synchronization is mainly to avoid + // concurrent access exceptions if this invariant is violated by mistake; should + // always be uncontended. + private Map callCounters = new HashMap<>(); + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", justification="Can't figure why this complaint is happening... see below") Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header, @@ -647,6 +655,43 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { public boolean isRetryImmediatelySupported() { return retryImmediatelySupported; } + + @Override + public void setCallMetric(String name, long value) { + AtomicLong counter; + synchronized (callCounters) { // Expected to always be the uncontended case + counter = callCounters.get(name); + if (counter == null) { + counter = new AtomicLong(); + callCounters.put(name, counter); + } + } + counter.set(value); + } + + @Override + public long updateCallMetric(String name, long delta) { + AtomicLong counter = callCounters.get(name); + synchronized (callCounters) { // Expected to always be the uncontended case + counter = callCounters.get(name); + if (counter == null) { + counter = new AtomicLong(); + callCounters.put(name, counter); + } + } + return counter.addAndGet(delta); + } + + @Override + public Map getCallMetrics() { + Map map = new TreeMap<>(); // TreeMap so enumeration will be sorted + synchronized (callCounters) { // Expected to always be the uncontended case + for (Map.Entry e: callCounters.entrySet()) { + map.put(e.getKey(), e.getValue().longValue()); + } + } + return map; + } } /** Listens on the socket. Creates jobs for the handler threads*/ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java new file mode 100644 index 0000000000..3a1021a0dc --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ipc.RpcServer.Call; + +public class ServerCall { + + static final Log LOG = LogFactory.getLog(ServerCall.class); + + public static boolean isTracing() { + return LOG.isTraceEnabled(); + } + + public static Map getCurrentCallMetrics() { + RpcCallContext call = RpcServer.getCurrentCall(); + if (call != null) { + return call.getCallMetrics(); + } + return new HashMap<>(); + } + + public static void setCurrentCallMetric(String name, long delta) { + RpcCallContext call = RpcServer.getCurrentCall(); + if (call != null) { + call.setCallMetric(name, delta); + } + } + + public static long updateCurrentCallMetric(String name, long delta) { + RpcCallContext call = RpcServer.getCurrentCall(); + if (call != null) { + return call.updateCallMetric(name, delta); + } + return 0; + } + + public static void logCallTrace(Call call, boolean successful) { + StringBuffer sb = new StringBuffer(); + sb.append(call.toShortString()); + sb.append(" successful: "); sb.append(successful); + Map metrics = call.getCallMetrics(); + if (!metrics.isEmpty()) { + sb.append(" metrics: ["); + for (Map.Entry e: metrics.entrySet()) { + sb.append(" \""); sb.append(e.getKey()); sb.append("\": "); + sb.append(e.getValue()); + } + sb.append(" ]"); + } + ServerCall.LOG.trace(sb.toString()); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 32bfbd5cd2..1f0353c6c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -809,6 +810,13 @@ public class DefaultMemStore implements MemStore { */ @Override public synchronized boolean seek(Cell key) { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (key == null) { close(); return false; @@ -821,6 +829,14 @@ public class DefaultMemStore implements MemStore { snapshotItRow = null; return seekInSubLists(key); + + } finally { + if (start > 0) { // only nonzero if we were tracing at entry + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("memstore_seek_ns", end - start); + ServerCall.updateCurrentCallMetric("memstore_seek", 1); + } + } } @@ -846,6 +862,13 @@ public class DefaultMemStore implements MemStore { */ @Override public synchronized boolean reseek(Cell key) { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + /* See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this implementation. This code is executed concurrently with flush and puts, without locks. @@ -862,8 +885,15 @@ public class DefaultMemStore implements MemStore { snapshotIt = snapshotAtCreation.getCellSkipListSet().tailSet(getHighest(key, snapshotItRow)).iterator(); return seekInSubLists(key); - } + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("memstore_reseek_ns", end - start); + ServerCall.updateCurrentCallMetric("memstore_reseek", 1); + } + } + } @Override public synchronized Cell peek() { @@ -873,6 +903,13 @@ public class DefaultMemStore implements MemStore { @Override public synchronized Cell next() { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (theNext == null) { return null; } @@ -893,6 +930,14 @@ public class DefaultMemStore implements MemStore { //DebugPrint.println(" MS@" + hashCode() + " next: " + theNext + " next_next: " + // getLowest() + " threadpoint=" + readpoint); return ret; + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("memstore_next_ns", end - start); + ServerCall.updateCurrentCallMetric("memstore_next", 1); + } + } } /* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index 01a102015e..30e112ee46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; +import org.apache.hadoop.hbase.ipc.ServerCall; /** * ScannerContext instances encapsulate limit tracking AND progress towards those limits during @@ -215,8 +216,10 @@ public class ScannerContext { if (!NextState.isValidState(state)) { throw new IllegalArgumentException("Cannot set to invalid state: " + state); } - this.scannerState = state; + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("scanner_state_" + state.name().toLowerCase(), 1); + } return state; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 7343eafe0b..decaabb3e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; @@ -173,8 +174,14 @@ public class StoreFileScanner implements KeyValueScanner { @Override public Cell next() throws IOException { - Cell retKey = cur; + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + try { + + Cell retKey = cur; try { // only seek if we aren't at the end. cur == null implies 'end'. if (cur != null) { @@ -190,11 +197,30 @@ public class StoreFileScanner implements KeyValueScanner { throw new IOException("Could not iterate " + this, e); } return retKey; + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("store_next_ns", end - start); + ServerCall.updateCurrentCallMetric("store_next_ns_" + hfs.getReader().getName(), end - start); + ServerCall.updateCurrentCallMetric("store_next", 1); + ServerCall.updateCurrentCallMetric("store_next_" + hfs.getReader().getName(), 1); + } + } } @Override public boolean seek(Cell key) throws IOException { - if (seekCount != null) seekCount.incrementAndGet(); + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + + if (seekCount != null) { + seekCount.incrementAndGet(); + } try { try { @@ -218,10 +244,27 @@ public class StoreFileScanner implements KeyValueScanner { } catch (IOException ioe) { throw new IOException("Could not seek " + this + " to key " + key, ioe); } + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("store_seek_ns", end - start); + ServerCall.updateCurrentCallMetric("store_seek_ns_" + hfs.getReader().getName(), end - start); + ServerCall.updateCurrentCallMetric("store_seek", 1); + ServerCall.updateCurrentCallMetric("store_seek_" + hfs.getReader().getName(), 1); + } + } } @Override public boolean reseek(Cell key) throws IOException { + long start = 0, end; + if (ServerCall.isTracing()) { + start = System.nanoTime(); + } + + try { + if (seekCount != null) seekCount.incrementAndGet(); try { @@ -246,6 +289,16 @@ public class StoreFileScanner implements KeyValueScanner { throw new IOException("Could not reseek " + this + " to key " + key, ioe); } + + } finally { + if (start > 0) { + end = System.nanoTime(); + ServerCall.updateCurrentCallMetric("store_reseek_ns", end - start); + ServerCall.updateCurrentCallMetric("store_reseek_ns_" + hfs.getReader().getName(), end - start); + ServerCall.updateCurrentCallMetric("store_reseek", 1); + ServerCall.updateCurrentCallMetric("store_reseek_" + hfs.getReader().getName(), 1); + } + } } protected void setCurrentCell(Cell newVal) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index c615925417..1737554c7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler; @@ -496,6 +497,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner boolean flushed = checkFlushed(); // reset matcher state, in case that underlying store changed checkReseek(flushed); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("seeks", 1); + } return this.heap.seek(key); } @@ -563,12 +567,20 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } } - if (prevCell != cell) ++kvsScanned; // Do object compare - we set prevKV from the same heap. + if (prevCell != cell) { // Do object compare - we set prevKV from the same heap. + ++kvsScanned; + } checkScanOrder(prevCell, cell, comparator); prevCell = cell; scannerContext.setLastPeekedCell(cell); topChanged = false; ScanQueryMatcher.MatchCode qcode = matcher.match(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("sqm_hint_" + qcode.toString().toLowerCase(), 1); + ServerCall.updateCurrentCallMetric("cells_matched", 1); + ServerCall.updateCurrentCallMetric("cells_matched__" + this.store.getRegionInfo().getRegionNameAsString() + "__" + + this.store.getColumnFamilyName(), 1); + } switch (qcode) { case INCLUDE: case INCLUDE_AND_SEEK_NEXT_ROW: @@ -1038,6 +1050,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // If called from RegionScanner.reseek(...) make sure the scanner // stack is reset if needed. checkReseek(flushed); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("reseeks", 1); + } if (explicitColumnQuery && lazySeekEnabledGlobally) { return heap.requestSeek(kv, true, useRowColBloom); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java index b253a524a4..71c03eb172 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.DeleteTracker; import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -217,6 +218,9 @@ public class LegacyScanQueryMatcher extends ScanQueryMatcher { // delete marker are not subject to other delete markers } else if (!this.deletes.isEmpty()) { DeleteResult deleteResult = deletes.isDeleted(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("delete_hint_" + deleteResult.toString().toLowerCase(), 1); + } switch (deleteResult) { case FAMILY_DELETED: case COLUMN_DELETED: @@ -243,12 +247,18 @@ public class LegacyScanQueryMatcher extends ScanQueryMatcher { // STEP 1: Check if the column is part of the requested columns MatchCode colChecker = columns.checkColumn(cell.getQualifierArray(), qualifierOffset, qualifierLength, typeByte); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("column_hint_" + colChecker.toString().toLowerCase(), 1); + } if (colChecker == MatchCode.INCLUDE) { ReturnCode filterResponse = ReturnCode.SKIP; // STEP 2: Yes, the column is part of the requested columns. Check if filter is present if (filter != null) { // STEP 3: Filter the key value and return if it filters out filterResponse = filter.filterKeyValue(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("filter_hint_" + filterResponse.toString().toLowerCase(), 1); + } switch (filterResponse) { case SKIP: return MatchCode.SKIP; @@ -287,6 +297,9 @@ public class LegacyScanQueryMatcher extends ScanQueryMatcher { columns.checkVersions(cell.getQualifierArray(), qualifierOffset, qualifierLength, timestamp, typeByte, mvccVersion > maxReadPointToTrackVersions); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("versions_hint_" + colChecker.toString().toLowerCase(), 1); + } return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL && colChecker == MatchCode.INCLUDE) ? MatchCode.INCLUDE_AND_SEEK_NEXT_COL : colChecker; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java index 64c48b14ac..bbc70145d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.DeleteTracker; import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult; import org.apache.hadoop.hbase.regionserver.HStore; @@ -171,6 +172,9 @@ public abstract class ScanQueryMatcher { return null; } DeleteResult deleteResult = deletes.isDeleted(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("delete_hint_" + deleteResult.toString().toLowerCase(), 1); + } switch (deleteResult) { case FAMILY_DELETED: case COLUMN_DELETED: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java index 50d6612457..16164fdbd9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.ipc.ServerCall; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; @@ -112,6 +113,9 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { // STEP 1: Check if the column is part of the requested columns MatchCode colChecker = columns.checkColumn(cell.getQualifierArray(), qualifierOffset, qualifierLength, typeByte); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("column_hint_" + colChecker.toString().toLowerCase(), 1); + } if (colChecker != MatchCode.INCLUDE) { return colChecker; } @@ -120,6 +124,9 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { if (filter != null) { // STEP 3: Filter the key value and return if it filters out filterResponse = filter.filterKeyValue(cell); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("filter_hint_" + filterResponse.toString().toLowerCase(), 1); + } switch (filterResponse) { case SKIP: return MatchCode.SKIP; @@ -156,6 +163,9 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher { */ colChecker = columns.checkVersions(cell.getQualifierArray(), qualifierOffset, qualifierLength, timestamp, typeByte, false); + if (ServerCall.isTracing()) { + ServerCall.updateCurrentCallMetric("versions_hint_" + colChecker.toString().toLowerCase(), 1); + } return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL && colChecker == MatchCode.INCLUDE) ? MatchCode.INCLUDE_AND_SEEK_NEXT_COL : colChecker; } -- 2.26.2