From 5b85716dc29c10636d02a0b41cc0d1479ce0c786 Mon Sep 17 00:00:00 2001 From: Tamas Penzes Date: Mon, 18 Sep 2017 21:49:31 +0200 Subject: [PATCH] HBASE-18601: Update Htrace to 4.2 Create TraceUtil class to wrap htrace methods. Use try with resources. --- hbase-backup/pom.xml | 10 ++ hbase-client/pom.xml | 12 +- .../hbase/client/AsyncRequestFutureImpl.java | 9 +- .../client/ResultBoundedCompletionService.java | 4 +- .../hadoop/hbase/ipc/BlockingRpcConnection.java | 7 +- .../java/org/apache/hadoop/hbase/ipc/Call.java | 6 +- .../java/org/apache/hadoop/hbase/ipc/IPCUtil.java | 7 +- .../hbase/zookeeper/RecoverableZooKeeper.java | 86 +++-------- hbase-common/pom.xml | 14 +- .../hbase/trace/HBaseHTraceConfiguration.java | 2 +- .../hadoop/hbase/trace/SpanReceiverHost.java | 10 +- .../org/apache/hadoop/hbase/trace/TraceUtil.java | 112 ++++++++++++++ hbase-endpoint/pom.xml | 24 +++ hbase-examples/pom.xml | 18 +++ hbase-external-blockcache/pom.xml | 10 +- .../hadoop/hbase/io/hfile/MemcachedBlockCache.java | 8 +- hbase-hadoop2-compat/pom.xml | 6 + hbase-it/pom.xml | 20 ++- .../hadoop/hbase/mttr/IntegrationTestMTTR.java | 15 +- .../trace/IntegrationTestSendTraceRequests.java | 54 +++---- hbase-mapreduce/pom.xml | 32 +++- .../hadoop/hbase/mapreduce/TableMapReduceUtil.java | 2 +- .../apache/hadoop/hbase/PerformanceEvaluation.java | 18 +-- hbase-prefix-tree/pom.xml | 8 + hbase-procedure/pom.xml | 6 + hbase-protocol-shaded/pom.xml | 4 + hbase-replication/pom.xml | 10 ++ hbase-rest/pom.xml | 12 ++ hbase-rsgroup/pom.xml | 10 ++ hbase-server/pom.xml | 30 +++- .../apache/hadoop/hbase/executor/EventHandler.java | 14 +- .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 15 +- .../org/apache/hadoop/hbase/ipc/CallRunner.java | 11 +- .../apache/hadoop/hbase/ipc/NettyRpcServer.java | 2 +- .../apache/hadoop/hbase/ipc/NettyServerCall.java | 5 +- .../hadoop/hbase/ipc/NettyServerRpcConnection.java | 5 +- .../java/org/apache/hadoop/hbase/ipc/RpcCall.java | 6 - .../org/apache/hadoop/hbase/ipc/ServerCall.java | 10 +- .../hadoop/hbase/ipc/ServerRpcConnection.java | 12 +- .../apache/hadoop/hbase/ipc/SimpleRpcServer.java | 2 +- .../apache/hadoop/hbase/ipc/SimpleServerCall.java | 5 +- .../hbase/ipc/SimpleServerRpcConnection.java | 7 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 25 +--- .../hadoop/hbase/regionserver/MemStoreFlusher.java | 11 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 25 ++-- .../hadoop/hbase/regionserver/wal/AsyncFSWAL.java | 48 +++--- .../hadoop/hbase/regionserver/wal/FSHLog.java | 48 +++--- .../hadoop/hbase/regionserver/wal/FSWALEntry.java | 2 +- .../hbase/regionserver/wal/RingBufferTruck.java | 2 +- .../hadoop/hbase/regionserver/wal/SyncFuture.java | 2 +- .../hadoop/hbase/ipc/TestSimpleRpcScheduler.java | 2 +- .../apache/hadoop/hbase/trace/TestHTraceHooks.java | 165 +++++++++++++++++---- .../hadoop/hbase/wal/WALPerformanceEvaluation.java | 32 ++-- hbase-shell/pom.xml | 28 ++++ hbase-shell/src/main/ruby/shell/commands/trace.rb | 4 +- hbase-spark/pom.xml | 12 ++ hbase-testing-util/pom.xml | 30 ++++ hbase-thrift/pom.xml | 22 +++ pom.xml | 60 ++++++-- src/main/asciidoc/_chapters/tracing.adoc | 4 +- 60 files changed, 804 insertions(+), 378 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml index 147f17f1d5..6282471297 100644 --- a/hbase-backup/pom.xml +++ b/hbase-backup/pom.xml @@ -175,6 +175,10 @@ hadoop-common + org.apache.htrace + htrace-core + + net.java.dev.jets3t jets3t @@ -287,6 +291,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index d9aa006fef..a7be74303c 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -166,7 +166,7 @@ org.apache.htrace - htrace-core + htrace-core4 org.codehaus.jackson @@ -259,6 +259,10 @@ hadoop-common + org.apache.htrace + htrace-core + + net.java.dev.jets3t jets3t @@ -326,6 +330,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index 4df176809f..f511d7ea6e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RetryImmediatelyException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.trace.TraceUtil; +import org.apache.hadoop.tracing.TraceUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.backoff.ServerStatistics; import org.apache.hadoop.hbase.client.coprocessor.Batch; @@ -56,7 +58,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.htrace.Trace; +import org.apache.htrace.core.Tracer; /** * The context, and return value, for a single submit/submitAll call. @@ -582,7 +584,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { asyncProcess.incTaskCounters(multiAction.getRegions(), server); SingleServerRequestRunnable runnable = createSingleServerRequest( multiAction, numAttempt, server, callsInProgress); - return Collections.singletonList(Trace.wrap("AsyncProcess.sendMultiAction", runnable)); + Tracer tracer = Tracer.curThreadTracer(); + return (tracer == null) ? null : Collections.singletonList(tracer.wrap(runnable, "AsyncProcess.sendMultiAction")); } // group the actions by the amount of delay @@ -618,7 +621,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { asyncProcess.connection.getConnectionMetrics().incrNormalRunners(); } } - runnable = Trace.wrap(traceText, runnable); + runnable = TraceUtil.wrap(runnable, traceText); toReturn.add(runnable); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java index ccfe6baceb..b05ad64146 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java @@ -28,9 +28,9 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.htrace.Trace; /** * A completion service for the RpcRetryingCallerFactory. @@ -168,7 +168,7 @@ public class ResultBoundedCompletionService { public void submit(RetryingCallable task, int callTimeout, int id) { QueueingFuture newFuture = new QueueingFuture<>(task, callTimeout, id); - executor.execute(Trace.wrap(newFuture)); + executor.execute(TraceUtil.wrap(newFuture, "ResultBoundedCompletionService.submit")); tasks[id] = newFuture; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index 052433684a..e19931409c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -55,6 +55,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -75,8 +76,8 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; /** * Thread that reads responses and notifies callers. Each connection owns a socket connected to a @@ -574,7 +575,7 @@ class BlockingRpcConnection extends RpcConnection implements Runnable { } private void tracedWriteRequest(Call call) throws IOException { - try (TraceScope ignored = Trace.startSpan("RpcClientImpl.tracedWriteRequest", call.span)) { + try (TraceScope ignored = TraceUtil.createTrace("RpcClientImpl.tracedWriteRequest", call.span.getSpanId())) { writeRequest(call); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java index 5c0689ada8..72f03f9616 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.Tracer; /** A call waiting for a value. */ @InterfaceAudience.Private @@ -73,7 +73,7 @@ class Call { this.timeout = timeout; this.priority = priority; this.callback = callback; - this.span = Trace.currentSpan(); + this.span = Tracer.getCurrentSpan(); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index 7c0ddf02f9..b02e6867ea 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -102,10 +102,11 @@ class IPCUtil { static RequestHeader buildRequestHeader(Call call, CellBlockMeta cellBlockMeta) { RequestHeader.Builder builder = RequestHeader.newBuilder(); builder.setCallId(call.id); - if (call.span != null) { + //TODO handle htrace API change + /*if (call.span != null) { builder.setTraceInfo(RPCTInfo.newBuilder().setParentId(call.span.getSpanId()) - .setTraceId(call.span.getTraceId())); - } + .setTraceId(call.span.getTracerId())); + }*/ builder.setMethodName(call.md.getName()); builder.setRequestParam(call.param != null); if (cellBlockMeta != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 1c3138bfa6..7074fb25af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -27,11 +27,14 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.RetryCounterFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; @@ -45,8 +48,6 @@ import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.proto.CreateRequest; import org.apache.zookeeper.proto.SetDataRequest; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; /** * A zookeeper that can handle 'recoverable' errors. @@ -169,11 +170,8 @@ public class RecoverableZooKeeper { * This function will not throw NoNodeException if the path does not * exist. */ - public void delete(String path, int version) - throws InterruptedException, KeeperException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.delete"); + public void delete(String path, int version) throws InterruptedException, KeeperException { + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.delete")) { RetryCounter retryCounter = retryCounterFactory.create(); boolean isRetry = false; // False for first attempt, true for all retries. while (true) { @@ -210,8 +208,6 @@ public class RecoverableZooKeeper { retryCounter.sleepUntilNextRetry(); isRetry = true; } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -219,11 +215,8 @@ public class RecoverableZooKeeper { * exists is an idempotent operation. Retry before throwing exception * @return A Stat instance */ - public Stat exists(String path, Watcher watcher) - throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.exists"); + public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException { + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -249,8 +242,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -258,11 +249,8 @@ public class RecoverableZooKeeper { * exists is an idempotent operation. Retry before throwing exception * @return A Stat instance */ - public Stat exists(String path, boolean watch) - throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.exists"); + public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException { + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -288,8 +276,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -309,9 +295,7 @@ public class RecoverableZooKeeper { */ public List getChildren(String path, Watcher watcher) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.getChildren"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -337,8 +321,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -348,9 +330,7 @@ public class RecoverableZooKeeper { */ public List getChildren(String path, boolean watch) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.getChildren"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -376,8 +356,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -387,9 +365,7 @@ public class RecoverableZooKeeper { */ public byte[] getData(String path, Watcher watcher, Stat stat) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.getData"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -415,8 +391,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -426,9 +400,7 @@ public class RecoverableZooKeeper { */ public byte[] getData(String path, boolean watch, Stat stat) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.getData"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -454,8 +426,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -467,9 +437,7 @@ public class RecoverableZooKeeper { */ public Stat setData(String path, byte[] data, int version) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.setData"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setData")) { RetryCounter retryCounter = retryCounterFactory.create(); byte[] newData = appendMetaData(data); boolean isRetry = false; @@ -517,8 +485,6 @@ public class RecoverableZooKeeper { retryCounter.sleepUntilNextRetry(); isRetry = true; } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -528,9 +494,7 @@ public class RecoverableZooKeeper { */ public List getAcl(String path, Stat stat) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.getAcl"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getAcl")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -556,8 +520,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -567,9 +529,7 @@ public class RecoverableZooKeeper { */ public Stat setAcl(String path, List acls, int version) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.setAcl"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setAcl")) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -595,8 +555,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -618,9 +576,7 @@ public class RecoverableZooKeeper { public String create(String path, byte[] data, List acl, CreateMode createMode) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.create"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.create")) { byte[] newData = appendMetaData(data); switch (createMode) { case EPHEMERAL: @@ -635,8 +591,6 @@ public class RecoverableZooKeeper { throw new IllegalArgumentException("Unrecognized CreateMode: " + createMode); } - } finally { - if (traceScope != null) traceScope.close(); } } @@ -765,9 +719,7 @@ public class RecoverableZooKeeper { */ public List multi(Iterable ops) throws KeeperException, InterruptedException { - TraceScope traceScope = null; - try { - traceScope = Trace.startSpan("RecoverableZookeeper.multi"); + try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.multi")) { RetryCounter retryCounter = retryCounterFactory.create(); Iterable multiOps = prepareZKMulti(ops); while (true) { @@ -794,8 +746,6 @@ public class RecoverableZooKeeper { } retryCounter.sleepUntilNextRetry(); } - } finally { - if (traceScope != null) traceScope.close(); } } diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 9bf40324a3..595f57fa82 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -244,7 +244,7 @@ org.apache.htrace - htrace-core + htrace-core4 org.apache.commons @@ -340,6 +340,12 @@ hadoop-common + + + org.apache.htrace + htrace-core + + @@ -386,6 +392,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java index 55e53e376a..07798823f3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.trace; import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.htrace.HTraceConfiguration; +import org.apache.htrace.core.HTraceConfiguration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java index cb65f0970e..de16015251 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java @@ -25,9 +25,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.htrace.SpanReceiver; -import org.apache.htrace.SpanReceiverBuilder; -import org.apache.htrace.Trace; +import org.apache.htrace.core.SpanReceiver; /** * This class provides functions for reading the names of SpanReceivers from @@ -78,18 +76,18 @@ public class SpanReceiverHost { return; } - SpanReceiverBuilder builder = new SpanReceiverBuilder(new HBaseHTraceConfiguration(conf)); + SpanReceiver.Builder builder = new SpanReceiver.Builder(new HBaseHTraceConfiguration(conf)); for (String className : receiverNames) { className = className.trim(); - SpanReceiver receiver = builder.spanReceiverClass(className).build(); + SpanReceiver receiver = builder.className(className).build(); if (receiver != null) { receivers.add(receiver); LOG.info("SpanReceiver " + className + " was loaded successfully."); } } for (SpanReceiver rcvr : receivers) { - Trace.addReceiver(rcvr); + TraceUtil.addReceiver(rcvr); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java new file mode 100644 index 0000000000..f8c3f8024c --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.trace; + + +import org.apache.htrace.core.Sampler; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.SpanId; +import org.apache.htrace.core.SpanReceiver; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; + +/** + * This wrapper class provides functions for accessing htrace 4+ functionality in a simplified way. + */ +public class TraceUtil { + /** + * Wrapper method to create new TraceScope with the given description + * @param description + * @return TraceScope + */ + public static TraceScope createTrace(String description) { + Tracer tracer = Tracer.curThreadTracer(); + return (tracer == null) ? null : tracer.newScope(description); + } + + /** + * Wrapper method to create new child TraceScope with the given description and parent scope's spanId + * @param description + * @param spanId parent's span id + * @return TraceScope + */ + public static TraceScope createTrace(String description, SpanId spanId) { + Tracer tracer = Tracer.curThreadTracer(); + return (tracer == null) ? null : tracer.newScope(description, spanId); + } + + /** + * Wrapper method to add new sampler to the default tracer + * @param sampler + * @return true if added, false if it was already added + */ + public static boolean addSampler(Sampler sampler){ + Tracer tracer = Tracer.curThreadTracer(); + return (tracer == null) ? null : tracer.addSampler(sampler); + } + + /** + * Wrapper method to add key-value pair to TraceInfo of actual span + * @param key + * @param value + */ + public static void addKVAnnotation(String key, String value){ + Span span = Tracer.getCurrentSpan(); + if (span != null) span.addKVAnnotation(key, value); + } + + /** + * Wrapper method to add receiver to actual tracerpool + * @param rcvr + * @return true if successfull, false if it was already added + */ + public static boolean addReceiver(SpanReceiver rcvr) { + Tracer tracer = Tracer.curThreadTracer(); + return (tracer == null) ? false : tracer.getTracerPool().addReceiver(rcvr); + } + + /** + * Wrapper method to remove receiver from actual tracerpool + * @param rcvr + * @return true if removed, false if doesn't exist + */ + public static boolean removeReceiver(SpanReceiver rcvr) { + Tracer tracer = Tracer.curThreadTracer(); + return (tracer == null) ? false : tracer.getTracerPool().removeReceiver(rcvr); + } + + /** + * Wrapper method to add timeline annotiation to current span with given message + * @param msg + */ + public static void addTimelineAnnotation(String msg) { + Span span = Tracer.getCurrentSpan(); + span.addTimelineAnnotation(msg); + } + + /** + * + * @param runnable + * @param description + * @return + */ + public static Runnable wrap(Runnable runnable, String description) { + Tracer tracer = Tracer.curThreadTracer(); + return (tracer == null) ? null : tracer.wrap(runnable, description); + } +} diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index fdb4784fcc..3e29fa9b25 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -242,6 +242,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -274,6 +280,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -285,6 +295,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -318,12 +332,22 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop hadoop-minicluster + org.apache.htrace + htrace-core + + com.google.guava guava diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index c72a874f9f..9f740a9334 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -233,6 +233,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + @@ -278,10 +284,22 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop hadoop-minicluster + + + org.apache.htrace + htrace-core + + diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml index 53708d83a0..845e8f33e1 100644 --- a/hbase-external-blockcache/pom.xml +++ b/hbase-external-blockcache/pom.xml @@ -173,7 +173,7 @@ org.apache.htrace - htrace-core + htrace-core4 junit @@ -245,6 +245,10 @@ hadoop-common + org.apache.htrace + htrace-core + + com.google.guava guava @@ -297,6 +301,10 @@ hadoop-common + org.apache.htrace + htrace-core + + com.google.guava guava diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index d7593676b3..845446e482 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -1,3 +1,4 @@ + /** * Copyright The Apache Software Foundation * @@ -28,13 +29,14 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; import org.apache.hadoop.hbase.util.Addressing; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; import java.io.IOException; @@ -134,7 +136,7 @@ public class MemcachedBlockCache implements BlockCache { // Assume that nothing is the block cache HFileBlock result = null; - try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) { + try (TraceScope traceScope = TraceUtil.createTrace("MemcachedBlockCache.getBlock")) { result = client.get(cacheKey.toString(), tc); } catch (Exception e) { // Catch a pretty broad set of exceptions to limit any changes in the memecache client diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml index c314aca163..1a13979c49 100644 --- a/hbase-hadoop2-compat/pom.xml +++ b/hbase-hadoop2-compat/pom.xml @@ -170,6 +170,12 @@ limitations under the License. org.apache.hadoop hadoop-common ${hadoop-two.version} + + + org.apache.htrace + htrace-core + + org.apache.commons diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 6df0b599fc..66c6ad3c0d 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -266,7 +266,7 @@ org.apache.htrace - htrace-core + htrace-core4 javax.ws.rs @@ -352,6 +352,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -402,10 +408,22 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop hadoop-minicluster + + + org.apache.htrace + htrace-core + + org.apache.hadoop diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 71e0d0b3f9..e98aaf064f 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -62,13 +62,13 @@ import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.ipc.FatalConnectionException; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.security.AccessDeniedException; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LoadTestTool; import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.impl.AlwaysSampler; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.AlwaysSampler; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -369,7 +369,7 @@ public class IntegrationTestMTTR { */ private static class TimingResult { DescriptiveStatistics stats = new DescriptiveStatistics(); - ArrayList traces = new ArrayList<>(10); + ArrayList traces = new ArrayList<>(10); /** * Add a result to this aggregate result. @@ -379,7 +379,7 @@ public class IntegrationTestMTTR { public void addResult(long time, Span span) { stats.addValue(TimeUnit.MILLISECONDS.convert(time, TimeUnit.NANOSECONDS)); if (TimeUnit.SECONDS.convert(time, TimeUnit.NANOSECONDS) >= 1) { - traces.add(span.getTraceId()); + traces.add(span.getTracerId()); } } @@ -424,7 +424,8 @@ public class IntegrationTestMTTR { long start = System.nanoTime(); TraceScope scope = null; try { - scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE); + scope = TraceUtil.createTrace(getSpanName()); + TraceUtil.addSampler(AlwaysSampler.INSTANCE); boolean actionResult = doAction(); if (actionResult && future.isDone()) { numAfterDone++; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java index 327d8792d6..d52a3538c1 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java @@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.ToolRunner; -import org.apache.htrace.Sampler; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Sampler; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -117,13 +117,12 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { for (int i = 0; i < 100; i++) { Runnable runnable = new Runnable() { - private TraceScope innerScope = null; private final LinkedBlockingQueue rowKeyQueue = rks; @Override public void run() { ResultScanner rs = null; - try { - innerScope = Trace.startSpan("Scan", Sampler.ALWAYS); + try (TraceScope scope = TraceUtil.createTrace("Scan")){ + TraceUtil.addSampler(Sampler.ALWAYS); Table ht = util.getConnection().getTable(tableName); Scan s = new Scan(); s.setStartRow(Bytes.toBytes(rowKeyQueue.take())); @@ -137,20 +136,15 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { accum |= Bytes.toLong(r.getRow()); } - innerScope.getSpan().addTimelineAnnotation("Accum result = " + accum); + TraceUtil.addTimelineAnnotation("Accum result = " + accum); ht.close(); ht = null; } catch (IOException e) { e.printStackTrace(); - - innerScope.getSpan().addKVAnnotation( - Bytes.toBytes("exception"), - Bytes.toBytes(e.getClass().getSimpleName())); - + TraceUtil.addKVAnnotation("exception", e.getClass().getSimpleName()); } catch (Exception e) { } finally { - if (innerScope != null) innerScope.close(); if (rs != null) rs.close(); } @@ -165,7 +159,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { throws IOException { for (int i = 0; i < 100; i++) { Runnable runnable = new Runnable() { - private TraceScope innerScope = null; private final LinkedBlockingQueue rowKeyQueue = rowKeys; @Override @@ -181,8 +174,8 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { long accum = 0; for (int x = 0; x < 5; x++) { - try { - innerScope = Trace.startSpan("gets", Sampler.ALWAYS); + try (TraceScope scope = TraceUtil.createTrace("gets")) { + TraceUtil.addSampler(Sampler.ALWAYS); long rk = rowKeyQueue.take(); Result r1 = ht.get(new Get(Bytes.toBytes(rk))); if (r1 != null) { @@ -192,14 +185,10 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { if (r2 != null) { accum |= Bytes.toLong(r2.getRow()); } - innerScope.getSpan().addTimelineAnnotation("Accum = " + accum); + TraceUtil.addTimelineAnnotation("Accum = " + accum); - } catch (IOException e) { + } catch (IOException|InterruptedException ie) { // IGNORED - } catch (InterruptedException ie) { - // IGNORED - } finally { - if (innerScope != null) innerScope.close(); } } @@ -210,25 +199,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { } private void createTable() throws IOException { - TraceScope createScope = null; - try { - createScope = Trace.startSpan("createTable", Sampler.ALWAYS); + try (TraceScope scope = TraceUtil.createTrace("createTable")) { + TraceUtil.addSampler(Sampler.ALWAYS); util.createTable(tableName, familyName); - } finally { - if (createScope != null) createScope.close(); } } private void deleteTable() throws IOException { - TraceScope deleteScope = null; - - try { + try (TraceScope scope = TraceUtil.createTrace("deleteTable")) { if (admin.tableExists(tableName)) { - deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS); + TraceUtil.addSampler(Sampler.ALWAYS); util.deleteTable(tableName); } - } finally { - if (deleteScope != null) deleteScope.close(); } } @@ -237,8 +219,8 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName); byte[] value = new byte[300]; for (int x = 0; x < 5000; x++) { - TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS); - try { + try (TraceScope traceScope = TraceUtil.createTrace("insertData")) { + TraceUtil.addSampler(Sampler.ALWAYS); for (int i = 0; i < 5; i++) { long rk = random.nextLong(); rowKeys.add(rk); @@ -252,8 +234,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { if ((x % 1000) == 0) { admin.flush(tableName); } - } finally { - traceScope.close(); } } admin.flush(tableName); diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index eaf6aea5d2..274c1e701d 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -185,7 +185,7 @@ org.apache.htrace - htrace-core + htrace-core4 org.apache.hbase @@ -251,6 +251,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -333,6 +337,10 @@ hadoop-common + org.apache.htrace + htrace-core + + net.java.dev.jets3t jets3t @@ -379,6 +387,10 @@ hadoop-hdfs + org.apache.htrace + htrace-core + + javax.servlet.jsp jsp-api @@ -416,6 +428,12 @@ org.apache.hadoop hadoop-minicluster test + + + org.apache.htrace + htrace-core + + @@ -440,11 +458,23 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop hadoop-minicluster + + + org.apache.htrace + htrace-core + + diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index 4dcd048f91..77a6b93e91 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -797,7 +797,7 @@ public class TableMapReduceUtil { com.google.protobuf.Message.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.class, org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class, - org.apache.htrace.Trace.class, + org.apache.htrace.core.Tracer.class, com.codahale.metrics.MetricRegistry.class, org.apache.commons.lang3.ArrayUtils.class); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 05e984ee56..050c8fc3b1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -50,6 +50,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.trace.TraceUtil; +import org.apache.hadoop.tracing.TraceUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; @@ -99,10 +101,10 @@ import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.codehaus.jackson.map.ObjectMapper; -import org.apache.htrace.Sampler; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.impl.ProbabilitySampler; +import org.apache.htrace.core.Sampler; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.ProbabilitySampler; import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects; import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -1038,7 +1040,7 @@ public class PerformanceEvaluation extends Configured implements Tool { protected final TestOptions opts; private final Status status; - private final Sampler traceSampler; + private final Sampler traceSampler; private final SpanReceiverHost receiverHost; private String testName; @@ -1188,11 +1190,9 @@ public class PerformanceEvaluation extends Configured implements Tool { for (int i = startRow; i < lastRow; i++) { if (i % everyN != 0) continue; long startTime = System.nanoTime(); - TraceScope scope = Trace.startSpan("test row", traceSampler); - try { + try (TraceScope scope = TraceUtil.createTrace("test row");){ + TraceUtil.addSampler(traceSampler); testRow(i); - } finally { - scope.close(); } if ( (i - startRow) > opts.measureAfter) { // If multiget is enabled, say set to 10, testRow() returns immediately first 9 times diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml index e5317062cc..38c686e750 100644 --- a/hbase-prefix-tree/pom.xml +++ b/hbase-prefix-tree/pom.xml @@ -150,6 +150,10 @@ hadoop-common + org.apache.htrace + htrace-core + + com.google.guava guava @@ -178,6 +182,10 @@ hadoop-common + org.apache.htrace + htrace-core + + com.google.guava guava diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml index 764457a216..bb9ce84423 100644 --- a/hbase-procedure/pom.xml +++ b/hbase-procedure/pom.xml @@ -164,6 +164,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index 9bc79c85ac..10cea2fe68 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -230,6 +230,10 @@ junit test + + org.apache.htrace + htrace-core4 + diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index a56a470c81..942fd8cc85 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -166,6 +166,10 @@ hadoop-common + org.apache.htrace + htrace-core + + net.java.dev.jets3t jets3t @@ -229,6 +233,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 0b188095c1..254648e9aa 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -391,6 +391,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -430,6 +436,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml index 8a26ead8a9..b9f95cbb29 100644 --- a/hbase-rsgroup/pom.xml +++ b/hbase-rsgroup/pom.xml @@ -208,6 +208,10 @@ hadoop-common + org.apache.htrace + htrace-core + + net.java.dev.jets3t jets3t @@ -270,6 +274,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index ad080f18d7..5dddea1ef4 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -578,7 +578,7 @@ org.apache.htrace - htrace-core + htrace-core4 com.lmax @@ -746,6 +746,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -793,6 +799,10 @@ hadoop-hdfs + org.apache.htrace + htrace-core + + com.google.guava guava @@ -805,6 +815,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -816,6 +830,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -877,6 +895,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -888,6 +912,10 @@ hadoop-minicluster + org.apache.htrace + htrace-core + + com.google.guava guava diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index cfe3d61ee0..5ebf001bbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -23,11 +23,12 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Server; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; /** * Abstract base class for all HBase event handlers. Subclasses should @@ -74,7 +75,7 @@ public abstract class EventHandler implements Runnable, Comparable { * Default base class constructor. */ public EventHandler(Server server, EventType eventType) { - this.parent = Trace.currentSpan(); + this.parent = Tracer.getCurrentSpan(); this.server = server; this.eventType = eventType; seqid = seqids.incrementAndGet(); @@ -99,13 +100,10 @@ public abstract class EventHandler implements Runnable, Comparable { @Override public void run() { - TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent); - try { + try (TraceScope scope = TraceUtil.createTrace(this.getClass().getSimpleName(), parent.getSpanId())) { process(); } catch(Throwable t) { handleException(t); - } finally { - chunk.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index bf722de56e..05ed0a438e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; @@ -57,8 +58,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IdLock; import org.apache.hadoop.hbase.util.ObjectIntPair; import org.apache.hadoop.io.WritableUtils; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; @@ -1438,8 +1439,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { boolean useLock = false; IdLock.Entry lockEntry = null; - TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock"); - try { + try (TraceScope traceScope = TraceUtil.createTrace("HFileReaderImpl.readBlock")) { while (true) { // Check cache for block. If found return. if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) { @@ -1454,9 +1454,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { if (LOG.isTraceEnabled()) { LOG.trace("From Cache " + cachedBlock); } - if (Trace.isTracing()) { - traceScope.getSpan().addTimelineAnnotation("blockCacheHit"); - } + TraceUtil.addTimelineAnnotation("blockCacheHit"); assert cachedBlock.isUnpacked() : "Packed block leak."; if (cachedBlock.getBlockType().isData()) { if (updateCacheMetrics) { @@ -1482,7 +1480,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // Carry on, please load. } - if (Trace.isTracing()) { + if (traceScope != null) { traceScope.getSpan().addTimelineAnnotation("blockCacheMiss"); } // Load block from filesystem. @@ -1506,7 +1504,6 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { return unpacked; } } finally { - traceScope.close(); if (lockEntry != null) { offsetLock.releaseLockEntry(lockEntry); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index a8d220802b..08d4fd23c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -23,6 +23,7 @@ import java.nio.channels.ClosedChannelException; import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; @@ -31,8 +32,6 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; /** * The request processing logic, which is usually executed in thread pools provided by an @@ -115,20 +114,17 @@ public class CallRunner { String error = null; Pair resultPair = null; RpcServer.CurCall.set(call); - TraceScope traceScope = null; try { if (!this.rpcServer.isStarted()) { InetSocketAddress address = rpcServer.getListenerAddress(); throw new ServerNotRunningYetException("Server " + (address != null ? address : "(channel closed)") + " is not running yet"); } - if (call.getTraceInfo() != null) { String serviceName = call.getService() != null ? call.getService().getDescriptorForType().getName() : ""; String methodName = (call.getMethod() != null) ? call.getMethod().getName() : ""; String traceString = serviceName + "." + methodName; - traceScope = Trace.startSpan(traceString, call.getTraceInfo()); - } + TraceUtil.createTrace(traceString); // make the call resultPair = this.rpcServer.call(call, this.status); } catch (TimeoutIOException e){ @@ -149,9 +145,6 @@ public class CallRunner { throw (Error)e; } } finally { - if (traceScope != null) { - traceScope.close(); - } RpcServer.CurCall.set(null); if (resultPair != null) { this.rpcServer.addCallSize(call.getSize() * -1); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index 7fd4736491..f86fa774ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -181,7 +181,7 @@ public class NettyRpcServer extends RpcServer { Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime, int timeout) throws IOException { NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null, - -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null); + -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null); return call(fakeCall, status); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java index 7dfdc7272e..70b9da3014 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.htrace.TraceInfo; /** * Datastructure that holds all necessary to a method invocation and then afterward, carries the @@ -40,9 +39,9 @@ class NettyServerCall extends ServerCall { NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size, - TraceInfo tinfo, InetAddress remoteAddress, long receiveTime, int timeout, + InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { - super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress, + super(id, service, md, header, param, cellScanner, connection, size, remoteAddress, receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java index 21c7f51905..a91aafb58e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.htrace.TraceInfo; /** * RpcConnection implementation for netty rpc server. @@ -119,9 +118,9 @@ class NettyServerRpcConnection extends ServerRpcConnection { @Override public NettyServerCall createCall(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, - long size, TraceInfo tinfo, final InetAddress remoteAddress, int timeout, + long size, final InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { - return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, tinfo, + return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, reqCleanup); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 3562d86fd3..51b168493d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.htrace.TraceInfo; /** * Interface of all necessary to carry out a RPC method invocation on the server. @@ -133,9 +132,4 @@ public interface RpcCall extends RpcCallContext { * @return A short string format of this call without possibly lengthy params */ String toShortString(); - - /** - * @return TraceInfo attached to this call. - */ - TraceInfo getTraceInfo(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index 2baba9ff60..8310f93d3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeade import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; -import org.apache.htrace.TraceInfo; /** * Datastructure that holds all necessary to a method invocation and then afterward, carries @@ -78,7 +77,6 @@ abstract class ServerCall implements RpcCall, Rpc protected final long size; // size of current call protected boolean isError; - protected final TraceInfo tinfo; protected ByteBufferListOutputStream cellBlockStream = null; protected CallCleanup reqCleanup = null; @@ -95,7 +93,7 @@ abstract class ServerCall implements RpcCall, Rpc @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", justification="Can't figure why this complaint is happening... see below") ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header, - Message param, CellScanner cellScanner, T connection, long size, TraceInfo tinfo, + Message param, CellScanner cellScanner, T connection, long size, InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) { this.id = id; @@ -109,7 +107,6 @@ abstract class ServerCall implements RpcCall, Rpc this.response = null; this.isError = false; this.size = size; - this.tinfo = tinfo; this.user = connection == null ? null : connection.user; // FindBugs: NP_NULL_ON_SOME_PATH this.remoteAddress = remoteAddress; this.retryImmediatelySupported = @@ -508,11 +505,6 @@ abstract class ServerCall implements RpcCall, Rpc } @Override - public TraceInfo getTraceInfo() { - return tinfo; - } - - @Override public synchronized BufferChain getResponse() { return response; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index e1ac74147a..4d0239f408 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -77,7 +77,6 @@ import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.htrace.TraceInfo; /** Reads calls from a connection and queues them for handling. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( @@ -632,7 +631,7 @@ abstract class ServerRpcConnection implements Closeable { if ((totalRequestSize + this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) { final ServerCall callTooBig = createCall(id, this.service, null, null, null, null, - totalRequestSize, null, null, 0, this.callCleanup); + totalRequestSize, null, 0, this.callCleanup); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, "Call queue is full on " + this.rpcServer.server.getServerName() + @@ -694,21 +693,18 @@ abstract class ServerRpcConnection implements Closeable { } ServerCall readParamsFailedCall = createCall(id, this.service, null, null, null, null, - totalRequestSize, null, null, 0, this.callCleanup); + totalRequestSize, null, 0, this.callCleanup); readParamsFailedCall.setResponse(null, null, t, msg + "; " + t.getMessage()); readParamsFailedCall.sendResponseIfReady(); return; } - TraceInfo traceInfo = header.hasTraceInfo() ? new TraceInfo(header - .getTraceInfo().getTraceId(), header.getTraceInfo().getParentId()) - : null; int timeout = 0; if (header.hasTimeout() && header.getTimeout() > 0) { timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout()); } ServerCall call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize, - traceInfo, this.addr, timeout, this.callCleanup); + this.addr, timeout, this.callCleanup); if (!this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) { this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize()); @@ -790,7 +786,7 @@ abstract class ServerRpcConnection implements Closeable { public abstract boolean isConnectionOpen(); public abstract ServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo, + RequestHeader header, Message param, CellScanner cellScanner, long size, InetAddress remoteAddress, int timeout, CallCleanup reqCleanup); private static class ByteBuffByteInput extends ByteInput { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 69cc48d4d3..36ae74a060 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -489,7 +489,7 @@ public class SimpleRpcServer extends RpcServer { Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime, int timeout) throws IOException { SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner, - null, -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null); + null, -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null); return call(fakeCall, status); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java index 5a26c05b46..46295fda15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.htrace.TraceInfo; /** * Datastructure that holds all necessary to a method invocation and then afterward, carries the @@ -43,10 +42,10 @@ class SimpleServerCall extends ServerCall { justification = "Can't figure why this complaint is happening... see below") SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, - SimpleServerRpcConnection connection, long size, TraceInfo tinfo, + SimpleServerRpcConnection connection, long size, final InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) { - super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress, + super(id, service, md, header, param, cellScanner, connection, size, remoteAddress, receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup); this.responder = responder; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index 68545f3c70..c8dfe4ab8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.util.Pair; -import org.apache.htrace.TraceInfo; /** Reads calls from a connection and queues them for handling. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT", @@ -212,7 +211,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection { // Notify the client about the offending request SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null, - null, null, null, this, 0, null, this.addr, System.currentTimeMillis(), 0, + null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0, this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, null, responder); this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION); // Make sure the client recognizes the underlying exception @@ -343,9 +342,9 @@ class SimpleServerRpcConnection extends ServerRpcConnection { @Override public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md, - RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo, + RequestHeader header, Message param, CellScanner cellScanner, long size, InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { - return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, tinfo, + return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, reqCleanup, this.responder); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 86a24adbff..cc5c33fdc7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -193,8 +193,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.util.StringUtils; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.hadoop.hbase.trace.TraceUtil; +import org.apache.htrace.core.TraceScope; @SuppressWarnings("deprecation") @InterfaceAudience.Private @@ -5361,16 +5361,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi RowLockContext rowLockContext = null; RowLockImpl result = null; - TraceScope traceScope = null; - - // If we're tracing start a span to show how long this took. - if (Trace.isTracing()) { - traceScope = Trace.startSpan("HRegion.getRowLock"); - traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock")); - } boolean success = false; - try { + try (TraceScope scope = TraceUtil.createTrace("HRegion.getRowLock")) { + TraceUtil.addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock")); // Keep trying until we have a lock or error out. // TODO: do we need to add a time component here? while (result == null) { @@ -5396,9 +5390,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) { - if (traceScope != null) { - traceScope.getSpan().addTimelineAnnotation("Failed to get row lock"); - } + TraceUtil.addTimelineAnnotation("Failed to get row lock"); result = null; String message = "Timed out waiting for lock for row: " + rowKey + " in region " + getRegionInfo().getEncodedName(); @@ -5416,9 +5408,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi LOG.warn("Thread interrupted waiting for lock on row: " + rowKey); InterruptedIOException iie = new InterruptedIOException(); iie.initCause(ie); - if (traceScope != null) { - traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock"); - } + TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock"); Thread.currentThread().interrupt(); throw iie; } finally { @@ -5426,9 +5416,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (!success && rowLockContext != null) { rowLockContext.cleanUp(); } - if (traceScope != null) { - traceScope.close(); - } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 8fa686c71c..df16b30f75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -44,6 +44,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.regionserver.Region.FlushResult; @@ -56,8 +57,8 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; /** * Thread that flushes cache on request @@ -570,12 +571,10 @@ class MemStoreFlusher implements FlushRequester { * amount of memstore consumption. */ public void reclaimMemStoreMemory() { - TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory"); + TraceScope scope = TraceUtil.createTrace("MemStoreFluser.reclaimMemStoreMemory"); FlushType flushType = isAboveHighWaterMark(); if (flushType != FlushType.NORMAL) { - if (Trace.isTracing()) { - scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark."); - } + TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark."); long start = EnvironmentEdgeManager.currentTime(); synchronized (this.blockSignal) { boolean blocked = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 815710879a..75b4428eda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -58,6 +58,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.trace.TraceUtil; +import org.apache.htrace.core.TraceScope; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; @@ -76,10 +78,7 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALProvider.WriterBase; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.util.StringUtils; -import org.apache.htrace.NullScope; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Span; /** * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS. Only one @@ -680,8 +679,7 @@ public abstract class AbstractFSWAL implements WAL { * @throws IOException if there is a problem flushing or closing the underlying FS */ Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException { - TraceScope scope = Trace.startSpan("FSHFile.replaceWriter"); - try { + try (TraceScope scope = TraceUtil.createTrace("FSHFile.replaceWriter")) { long oldFileLen = doReplaceWriter(oldPath, newPath, nextWriter); int oldNumEntries = this.numEntries.getAndSet(0); final String newPathString = (null == newPath ? null : FSUtils.getPath(newPath)); @@ -695,8 +693,6 @@ public abstract class AbstractFSWAL implements WAL { LOG.info("New WAL " + newPathString); } return newPath; - } finally { - scope.close(); } } @@ -747,8 +743,7 @@ public abstract class AbstractFSWAL implements WAL { LOG.debug("WAL closing. Skipping rolling of writer"); return regionsToFlush; } - TraceScope scope = Trace.startSpan("FSHLog.rollWriter"); - try { + try (TraceScope scope = TraceUtil.createTrace("FSHLog.rollWriter")) { Path oldPath = getOldPath(); Path newPath = getNewPath(); // Any exception from here on is catastrophic, non-recoverable so we currently abort. @@ -768,8 +763,6 @@ public abstract class AbstractFSWAL implements WAL { } } finally { closeBarrier.endOp(); - assert scope == NullScope.INSTANCE || !scope.isDetached(); - scope.close(); } return regionsToFlush; } finally { @@ -948,7 +941,7 @@ public abstract class AbstractFSWAL implements WAL { if (timeInNanos > this.slowSyncNs) { String msg = new StringBuilder().append("Slow sync cost: ").append(timeInNanos / 1000000) .append(" ms, current pipeline: ").append(Arrays.toString(getPipeline())).toString(); - Trace.addTimelineAnnotation(msg); + TraceUtil.addTimelineAnnotation(msg); LOG.info(msg); } if (!listeners.isEmpty()) { @@ -964,16 +957,16 @@ public abstract class AbstractFSWAL implements WAL { if (this.closed) { throw new IOException("Cannot append; log is closed, regionName = " + hri.getRegionNameAsString()); } - TraceScope scope = Trace.startSpan(implClassName + ".append"); MutableLong txidHolder = new MutableLong(); MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> { txidHolder.setValue(ringBuffer.next()); }); long txid = txidHolder.longValue(); - try { + try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) { FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore); entry.stampRegionSequenceId(we); - ringBuffer.get(txid).load(entry, scope.detach()); + ringBuffer.get(txid).load(entry, scope.getSpan()); + scope.detach(); } finally { ringBuffer.publish(txid); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 42183ec1c3..0b59ced6f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -54,6 +54,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput; @@ -64,10 +65,8 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.ipc.RemoteException; -import org.apache.htrace.NullScope; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.TraceScope; /** * An asynchronous implementation of FSWAL. @@ -342,9 +341,10 @@ public class AsyncFSWAL extends AbstractFSWAL { } private void addTimeAnnotation(SyncFuture future, String annotation) { - TraceScope scope = Trace.continueSpan(future.getSpan()); - Trace.addTimelineAnnotation(annotation); - future.setSpan(scope.detach()); + TraceUtil.addTimelineAnnotation(annotation); + //TODO handle htrace API change + //future.setSpan(scope.getSpan()); + //scope.detach(); } private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) { @@ -415,14 +415,16 @@ public class AsyncFSWAL extends AbstractFSWAL { Span span = entry.detachSpan(); // the span maybe null if this is a retry after rolling. if (span != null) { - TraceScope scope = Trace.continueSpan(span); + //TODO handle htrace API change + //TraceScope scope = Trace.continueSpan(span); try { appended = append(writer, entry); } catch (IOException e) { throw new AssertionError("should not happen", e); } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); - scope.close(); // append scope is complete + //TODO handle htrace API change + //assert scope == NullScope.INSTANCE || !scope.isDetached(); + //scope.close(); // append scope is complete } } else { try { @@ -559,12 +561,12 @@ public class AsyncFSWAL extends AbstractFSWAL { @Override public void sync() throws IOException { - TraceScope scope = Trace.startSpan("AsyncFSWAL.sync"); - try { + try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")){ long txid = waitingConsumePayloads.next(); SyncFuture future; try { - future = getSyncFuture(txid, scope.detach()); + future = getSyncFuture(txid, scope.getSpan()); + scope.detach(); RingBufferTruck truck = waitingConsumePayloads.get(txid); truck.load(future); } finally { @@ -573,10 +575,9 @@ public class AsyncFSWAL extends AbstractFSWAL { if (shouldScheduleConsumer()) { eventLoop.execute(consumer); } - scope = Trace.continueSpan(blockOnSync(future)); - } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); - scope.close(); + //TODO handle htrace API change + //scope = Trace.continueSpan(blockOnSync(future)); + blockOnSync(future); } } @@ -585,13 +586,13 @@ public class AsyncFSWAL extends AbstractFSWAL { if (highestSyncedTxid.get() >= txid) { return; } - TraceScope scope = Trace.startSpan("AsyncFSWAL.sync"); - try { + try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")) { // here we do not use ring buffer sequence as txid long sequence = waitingConsumePayloads.next(); SyncFuture future; try { - future = getSyncFuture(txid, scope.detach()); + future = getSyncFuture(txid, scope.getSpan()); + scope.detach(); RingBufferTruck truck = waitingConsumePayloads.get(sequence); truck.load(future); } finally { @@ -600,10 +601,9 @@ public class AsyncFSWAL extends AbstractFSWAL { if (shouldScheduleConsumer()) { eventLoop.execute(consumer); } - scope = Trace.continueSpan(blockOnSync(future)); - } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); - scope.close(); + //TODO handle htrace API change + //scope = Trace.continueSpan(blockOnSync(future)); + blockOnSync(future); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 7e0fc37411..040a2ebab4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -61,10 +62,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.htrace.NullScope; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.TraceScope; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; @@ -345,7 +344,7 @@ public class FSHLog extends AbstractFSWAL { // use assert to make sure no change breaks the logic that // sequence and zigzagLatch will be set together assert sequence > 0L : "Failed to get sequence from ring buffer"; - Trace.addTimelineAnnotation("awaiting safepoint"); + TraceUtil.addTimelineAnnotation("awaiting safepoint"); syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence)); } } catch (FailedSyncBeforeLogCloseException e) { @@ -361,9 +360,9 @@ public class FSHLog extends AbstractFSWAL { if (this.writer != null) { oldFileLen = this.writer.getLength(); try { - Trace.addTimelineAnnotation("closing writer"); + TraceUtil.addTimelineAnnotation("closing writer"); this.writer.close(); - Trace.addTimelineAnnotation("writer closed"); + TraceUtil.addTimelineAnnotation("writer closed"); this.closeErrorCount.set(0); } catch (IOException ioe) { int errors = closeErrorCount.incrementAndGet(); @@ -595,13 +594,14 @@ public class FSHLog extends AbstractFSWAL { } // I got something. Lets run. Save off current sequence number in case it changes // while we run. - TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan()); + //TODO handle htrace API change + //TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan()); long start = System.nanoTime(); Throwable lastException = null; try { - Trace.addTimelineAnnotation("syncing writer"); + TraceUtil.addTimelineAnnotation("syncing writer"); writer.sync(); - Trace.addTimelineAnnotation("writer synced"); + TraceUtil.addTimelineAnnotation("writer synced"); currentSequence = updateHighestSyncedSequence(currentSequence); } catch (IOException e) { LOG.error("Error syncing, request close of WAL", e); @@ -611,7 +611,9 @@ public class FSHLog extends AbstractFSWAL { lastException = e; } finally { // reattach the span to the future before releasing. - takeSyncFuture.setSpan(scope.detach()); + //TODO handle htrace API change + //takeSyncFuture.setSpan(scope.getSpan()); + //scope.detach(); // First release what we 'took' from the queue. syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException); // Can we release other syncs? @@ -754,12 +756,9 @@ public class FSHLog extends AbstractFSWAL { @Override public void sync() throws IOException { - TraceScope scope = Trace.startSpan("FSHLog.sync"); - try { - scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach())); - } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); - scope.close(); + try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) { + publishSyncThenBlockOnCompletion(scope.getSpan()); + scope.detach(); } } @@ -769,12 +768,9 @@ public class FSHLog extends AbstractFSWAL { // Already sync'd. return; } - TraceScope scope = Trace.startSpan("FSHLog.sync"); - try { - scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach())); - } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); - scope.close(); + try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) { + publishSyncThenBlockOnCompletion(scope.getSpan()); + scope.detach(); } } @@ -992,7 +988,8 @@ public class FSHLog extends AbstractFSWAL { } } else if (truck.type() == RingBufferTruck.Type.APPEND) { FSWALEntry entry = truck.unloadAppend(); - TraceScope scope = Trace.continueSpan(entry.detachSpan()); + //TODO handle htrace API change + //TraceScope scope = Trace.continueSpan(entry.detachSpan()); try { if (this.exception != null) { @@ -1011,9 +1008,6 @@ public class FSHLog extends AbstractFSWAL { : new DamagedWALException("On sync", this.exception)); // Return to keep processing events coming off the ringbuffer return; - } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); - scope.close(); // append scope is complete } } else { // What is this if not an append or sync. Fail all up to this!!! diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 7c46976fd3..e28d1edb41 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.util.CollectionUtils; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; -import org.apache.htrace.Span; +import org.apache.htrace.core.Span; /** * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java index a63b281929..a833b326b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.regionserver.wal; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.htrace.Span; +import org.apache.htrace.core.Span; /** * A 'truck' to carry a payload across the ring buffer from Handler to WAL. Has EITHER a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java index 13d103bd06..0dbd0208e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java @@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; -import org.apache.htrace.Span; +import org.apache.htrace.core.Span; /** * A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till the diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index b4f93c5a9a..17da9b8c12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -508,7 +508,7 @@ public class TestSimpleRpcScheduler { ServerCall putCall = new ServerCall(1, null, null, RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(), RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))), - null, null, 9, null, null, timestamp, 0, null, null, null) { + null, null, 9, null, timestamp, 0, null, null, null) { @Override public void sendResponseIfReady() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java index 63bcbdf852..7cbd3dc269 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java @@ -21,9 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import java.lang.reflect.Method; -import java.util.Collection; - import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; @@ -31,12 +28,11 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.htrace.Sampler; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.TraceTree; -import org.apache.htrace.impl.POJOSpanReceiver; +import org.apache.htrace.core.MilliSpan; +import org.apache.htrace.core.POJOSpanReceiver; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.SpanId; +import org.apache.htrace.core.TraceScope; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -44,13 +40,22 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.TreeSet; + @Category({MiscTests.class, MediumTests.class}) public class TestHTraceHooks { private static final byte[] FAMILY_BYTES = "family".getBytes(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static POJOSpanReceiver rcvr; - private static long ROOT_SPAN_ID = 0; + private static SpanId ROOT_SPAN_ID = new SpanId(0, 0); @Rule public TestName name = new TestName(); @@ -58,35 +63,23 @@ public class TestHTraceHooks { @BeforeClass public static void before() throws Exception { - // Find out what the right value to use fo SPAN_ROOT_ID after HTRACE-111. We use HTRACE-32 - // to find out to detect if we are using HTrace 3.2 or not. - try { - Method m = Span.class.getMethod("addKVAnnotation", String.class, String.class); - } catch (NoSuchMethodException e) { - ROOT_SPAN_ID = 0x74aceL; // Span.SPAN_ROOT_ID pre HTrace-3.2 - } - TEST_UTIL.startMiniCluster(2, 3); rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration())); - Trace.addReceiver(rcvr); + TraceUtil.addReceiver(rcvr); } @AfterClass public static void after() throws Exception { TEST_UTIL.shutdownMiniCluster(); - Trace.removeReceiver(rcvr); + TraceUtil.removeReceiver(rcvr); rcvr = null; } @Test public void testTraceCreateTable() throws Exception { - TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS); Table table; - try { - + try (TraceScope scope = TraceUtil.createTrace("creating table")) { table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY_BYTES); - } finally { - tableCreationSpan.close(); } // Some table creation is async. Need to make sure that everything is full in before @@ -100,7 +93,7 @@ public class TestHTraceHooks { Collection spans = rcvr.getSpans(); TraceTree traceTree = new TraceTree(spans); - Collection roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID); + List roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID); assertEquals(1, roots.size()); Span createTableRoot = roots.iterator().next(); @@ -122,11 +115,8 @@ public class TestHTraceHooks { Put put = new Put("row".getBytes()); put.addColumn(FAMILY_BYTES, "col".getBytes(), "value".getBytes()); - TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS); - try { + try (TraceScope putSpan = TraceUtil.createTrace("doing put")) { table.put(put); - } finally { - putSpan.close(); } spans = rcvr.getSpans(); @@ -144,3 +134,118 @@ public class TestHTraceHooks { assertNotNull(putRoot); } } + +/** + * Used to create the graph formed by spans. + */ +class TraceTree { + + public static class SpansByParent { + private final TreeSet treeSet; + + private final HashMap> parentToSpans; + + SpansByParent(Collection spans) { + TreeSet treeSet = new TreeSet(); + parentToSpans = new HashMap>(); + for (Span span : spans) { + treeSet.add(span); + for (SpanId parent : span.getParents()) { + LinkedList list = parentToSpans.get(parent); + if (list == null) { + list = new LinkedList(); + parentToSpans.put(parent, list); + } + list.add(span); + } + if (span.getParents().length == 0) { + LinkedList list = parentToSpans.get(Long.valueOf(0L)); + if (list == null) { + list = new LinkedList(); + parentToSpans.put(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE), list); + } + list.add(span); + } + } + this.treeSet = treeSet; + } + + public List find(SpanId parentId) { + LinkedList spans = parentToSpans.get(parentId); + if (spans == null) { + return new LinkedList(); + } + return spans; + } + + public Iterator iterator() { + return Collections.unmodifiableSortedSet(treeSet).iterator(); + } + } + + public static class SpansByProcessId { + private final TreeSet treeSet; + + SpansByProcessId(Collection spans) { + TreeSet treeSet = new TreeSet(); + for (Span span : spans) { + treeSet.add(span); + } + this.treeSet = treeSet; + } + + public List find(String processId) { + List spans = new ArrayList(); + Span span = new MilliSpan.Builder(). + tracerId(Long.toString(Long.MIN_VALUE)). + spanId(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE)). + build(); + while (true) { + span = treeSet.higher(span); + if (span == null) { + break; + } + spans.add(span); + } + return spans; + } + + public Iterator iterator() { + return Collections.unmodifiableSortedSet(treeSet).iterator(); + } + } + + private final SpansByParent spansByParent; + private final SpansByProcessId spansByProcessId; + + /** + * Create a new TraceTree + * + * @param spans The collection of spans to use to create this TraceTree. Should + * have at least one root span. + */ + public TraceTree(Collection spans) { + this.spansByParent = new SpansByParent(spans); + this.spansByProcessId = new SpansByProcessId(spans); + } + + public SpansByParent getSpansByParent() { + return spansByParent; + } + + public SpansByProcessId getSpansByProcessId() { + return spansByProcessId; + } + + @Override + public String toString() { + StringBuilder bld = new StringBuilder(); + String prefix = ""; + for (Iterator iter = spansByParent.iterator(); iter.hasNext();) { + Span span = iter.next(); + bld.append(prefix).append(span.toString()); + prefix = "\n"; + } + return bld.toString(); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java index 02042302e9..eff3248e86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -59,10 +60,10 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.htrace.Sampler; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.impl.ProbabilitySampler; +import org.apache.htrace.core.Sampler; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.ProbabilitySampler; import com.codahale.metrics.ConsoleReporter; import com.codahale.metrics.Histogram; @@ -171,15 +172,13 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { Random rand = new Random(Thread.currentThread().getId()); WAL wal = region.getWAL(); - TraceScope threadScope = - Trace.startSpan("WALPerfEval." + Thread.currentThread().getName()); - try { + try (TraceScope threadScope = TraceUtil.createTrace("WALPerfEval." + Thread.currentThread().getName())) { long startTime = System.currentTimeMillis(); int lastSync = 0; for (int i = 0; i < numIterations; ++i) { - assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected."; - TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler); - try { + assert Tracer.getCurrentSpan() == threadScope.getSpan() : "Span leak detected."; + try (TraceScope loopScope = TraceUtil.createTrace("runLoopIter" + i)) { + TraceUtil.addSampler(loopSampler); long now = System.nanoTime(); Put put = setupPut(rand, key, value, numFamilies); WALEdit walEdit = new WALEdit(); @@ -195,16 +194,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { } } latencyHistogram.update(System.nanoTime() - now); - } finally { - loopScope.close(); } } long totalTime = (System.currentTimeMillis() - startTime); logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime); } catch (Exception e) { LOG.error(getClass().getSimpleName() + " Thread failed", e); - } finally { - threadScope.close(); } } } @@ -314,8 +309,9 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { LOG.info("FileSystem: " + fs); SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null; - final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER; - TraceScope scope = Trace.startSpan("WALPerfEval", sampler); + final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER; + TraceScope scope = TraceUtil.createTrace("WALPerfEval"); + TraceUtil.addSampler(sampler); try { if (rootRegionDir == null) { @@ -337,8 +333,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { // a table per desired region means we can avoid carving up the key space final HTableDescriptor htd = createHTableDescriptor(i, numFamilies); regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller); - benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync, - syncInterval, traceFreq)); + benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync, + syncInterval, traceFreq), ""); } ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics). outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build(); diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index 19ec0c2a65..ffd35d47ae 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -310,6 +310,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -358,6 +364,10 @@ hadoop-hdfs + org.apache.htrace + htrace-core + + com.google.guava guava @@ -370,6 +380,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -381,6 +395,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -434,12 +452,22 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop hadoop-minicluster + org.apache.htrace + htrace-core + + com.google.guava guava diff --git a/hbase-shell/src/main/ruby/shell/commands/trace.rb b/hbase-shell/src/main/ruby/shell/commands/trace.rb index 5ecd28cd0d..b0350be2ef 100644 --- a/hbase-shell/src/main/ruby/shell/commands/trace.rb +++ b/hbase-shell/src/main/ruby/shell/commands/trace.rb @@ -16,8 +16,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # -HTrace = org.apache.htrace.Trace -java_import org.apache.htrace.Sampler +HTrace = org.apache.htrace.core.Tracer +java_import org.apache.htrace.core.Sampler java_import org.apache.hadoop.hbase.trace.SpanReceiverHost module Shell diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml index ce7313a029..d39e9672aa 100644 --- a/hbase-spark/pom.xml +++ b/hbase-spark/pom.xml @@ -160,6 +160,10 @@ ${hadoop-two.version} + org.apache.htrace + htrace-core + + log4j log4j @@ -197,6 +201,10 @@ test + org.apache.htrace + htrace-core + + log4j log4j @@ -234,6 +242,10 @@ test + org.apache.htrace + htrace-core + + log4j log4j diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml index f55ee95c40..b488d9498c 100644 --- a/hbase-testing-util/pom.xml +++ b/hbase-testing-util/pom.xml @@ -140,6 +140,12 @@ org.apache.hadoop hadoop-common compile + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -185,6 +191,10 @@ compile + org.apache.htrace + htrace-core + + com.google.guava guava @@ -197,6 +207,10 @@ compile + org.apache.htrace + htrace-core + + com.google.guava guava @@ -208,6 +222,10 @@ compile + org.apache.htrace + htrace-core + + com.google.guava guava @@ -239,11 +257,23 @@ org.apache.hadoop hadoop-common compile + + + org.apache.htrace + htrace-core + + org.apache.hadoop hadoop-minicluster compile + + + org.apache.htrace + htrace-core + + org.apache.hadoop diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml index 0dc0dde64b..6457b33d38 100644 --- a/hbase-thrift/pom.xml +++ b/hbase-thrift/pom.xml @@ -513,6 +513,12 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop @@ -520,6 +526,10 @@ test + org.apache.htrace + htrace-core + + com.google.guava guava @@ -571,10 +581,22 @@ org.apache.hadoop hadoop-common + + + org.apache.htrace + htrace-core + + org.apache.hadoop hadoop-minicluster + + + org.apache.htrace + htrace-core + + diff --git a/pom.xml b/pom.xml index a684f3c749..78758b53e6 100755 --- a/pom.xml +++ b/pom.xml @@ -1413,7 +1413,7 @@ 9.1.10.0 4.12 1.3 - 3.2.0-incubating + 4.2.0-incubating 1.2.17 1.10.19 @@ -2023,7 +2023,7 @@ org.apache.htrace - htrace-core + htrace-core4 ${htrace.version} @@ -2424,6 +2424,10 @@ hadoop-hdfs + org.apache.htrace + htrace-core + + javax.servlet.jsp jsp-api @@ -2454,6 +2458,10 @@ test + org.apache.htrace + htrace-core + + javax.servlet.jsp jsp-api @@ -2486,6 +2494,10 @@ ${hadoop-two.version} + org.apache.htrace + htrace-core + + commons-beanutils commons-beanutils @@ -2527,10 +2539,14 @@ hadoop-minicluster ${hadoop-two.version} - - commons-httpclient - commons-httpclient - + + org.apache.htrace + htrace-core + + + commons-httpclient + commons-httpclient + javax.servlet.jsp jsp-api @@ -2630,6 +2646,10 @@ hadoop-hdfs + org.apache.htrace + htrace-core + + javax.servlet.jsp jsp-api @@ -2656,6 +2676,10 @@ test + org.apache.htrace + htrace-core + + javax.servlet.jsp jsp-api @@ -2689,10 +2713,14 @@ hadoop-common ${hadoop-three.version} - - commons-beanutils - commons-beanutils - + + org.apache.htrace + htrace-core + + + commons-beanutils + commons-beanutils + javax.servlet.jsp jsp-api @@ -2736,10 +2764,14 @@ hadoop-minicluster ${hadoop-three.version} - - commons-httpclient - commons-httpclient - + + org.apache.htrace + htrace-core + + + commons-httpclient + commons-httpclient + javax.servlet.jsp jsp-api diff --git a/src/main/asciidoc/_chapters/tracing.adoc b/src/main/asciidoc/_chapters/tracing.adoc index 0cddd8a522..2982832cfa 100644 --- a/src/main/asciidoc/_chapters/tracing.adoc +++ b/src/main/asciidoc/_chapters/tracing.adoc @@ -57,7 +57,7 @@ The `LocalFileSpanReceiver` looks in _hbase-site.xml_ for a `hbase.local-fi hbase.trace.spanreceiver.classes - org.apache.htrace.impl.LocalFileSpanReceiver + org.apache.htrace.core.LocalFileSpanReceiver hbase.htrace.local-file-span-receiver.path @@ -76,7 +76,7 @@ _htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7 hbase.trace.spanreceiver.classes - org.apache.htrace.impl.ZipkinSpanReceiver + org.apache.htrace.core.ZipkinSpanReceiver hbase.htrace.zipkin.collector-hostname -- 2.11.0 (Apple Git-81)