diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 96ed184..ed2a7da 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.htrace.Trace; +import org.apache.htrace.core.Tracer; import com.google.common.annotations.VisibleForTesting; @@ -1008,8 +1008,11 @@ class AsyncProcess { int numAttempt) { // no stats to manage, just do the standard action if (AsyncProcess.this.connection.getStatisticsTracker() == null) { - return Collections.singletonList(Trace.wrap("AsyncProcess.sendMultiAction", - new SingleServerRequestRunnable(multiAction, numAttempt, server, callsInProgress))); + Tracer tracer = Tracer.curThreadTracer(); + Runnable runnable = new SingleServerRequestRunnable(multiAction, numAttempt, server, + callsInProgress); + if (tracer != null) runnable = tracer.wrap(runnable, "AsyncProcess.sendMultiAction"); + return Collections.singletonList(runnable); } // group the actions by the amount of delay @@ -1039,8 +1042,8 @@ class AsyncProcess { traceText = "AsyncProcess.clientBackoff.sendMultiAction"; runnable = runner; } - runnable = Trace.wrap(traceText, runnable); - toReturn.add(runnable); + Tracer tracer = Tracer.curThreadTracer(); + toReturn.add(tracer != null? tracer.wrap(runnable, traceText): runnable); } return toReturn; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java index eacbe2d..7dc0541 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java @@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.htrace.Trace; +import org.apache.htrace.core.Tracer; /** * A completion service for the RpcRetryingCallerFactory. @@ -139,7 +139,8 @@ public class ResultBoundedCompletionService { public void submit(RetryingCallable task, int callTimeout, int id) { QueueingFuture newFuture = new QueueingFuture(task, callTimeout); - executor.execute(Trace.wrap(newFuture)); + Tracer tracer = Tracer.curThreadTracer(); + executor.execute(tracer != null? tracer.wrap(newFuture, "submit"): newFuture); tasks[id] = newFuture; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java index 43d75f9..43ffe82 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java @@ -17,18 +17,6 @@ */ package org.apache.hadoop.hbase.ipc; -import io.netty.bootstrap.Bootstrap; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufOutputStream; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import io.netty.util.Timeout; -import io.netty.util.TimerTask; -import io.netty.util.concurrent.GenericFutureListener; -import io.netty.util.concurrent.Promise; - import java.io.IOException; import java.net.ConnectException; import java.net.InetSocketAddress; @@ -68,13 +56,25 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; +import org.apache.htrace.core.SpanId; +import org.apache.htrace.core.Tracer; import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufOutputStream; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.netty.util.Timeout; +import io.netty.util.TimerTask; +import io.netty.util.concurrent.GenericFutureListener; +import io.netty.util.concurrent.Promise; + /** * Netty RPC channel */ @@ -406,10 +406,14 @@ public class AsyncRpcChannel { requestHeaderBuilder.setCallId(call.id) .setMethodName(call.method.getName()).setRequestParam(call.param != null); - if (Trace.isTracing()) { - Span s = Trace.currentSpan(); + SpanId spanId = Tracer.getCurrentSpanId(); + if (spanId.isValid()) { + // Pre-4.0.0 htrace, we had parent and trace id. In 4.0.0, there is one id only and it is + // 128 bits. Rather than add new fields or change field names, just set the high and low of + // the 128 integer into the old parent id and trace id fields. requestHeaderBuilder.setTraceInfo(TracingProtos.RPCTInfo.newBuilder(). - setParentId(s.getSpanId()).setTraceId(s.getTraceId())); + setParentId(spanId.getHigh()). + setTraceId(spanId.getLow())); } ByteBuffer cellBlock = client.buildCellBlock(call.controller.cellScanner()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java index b09674c..bcedaf5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java @@ -19,10 +19,36 @@ package org.apache.hadoop.hbase.ipc; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.Message.Builder; -import com.google.protobuf.RpcCallback; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.Closeable; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InterruptedIOException; +import java.io.OutputStream; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.net.SocketFactory; +import javax.security.sasl.SaslException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -64,9 +90,9 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.SpanId; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import javax.net.SocketFactory; import javax.security.sasl.SaslException; @@ -99,6 +125,10 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Message.Builder; +import com.google.protobuf.RpcCallback; /** * Does RPC against a cluster. Manages connections per regionserver in the cluster. @@ -142,15 +172,15 @@ public class RpcClientImpl extends AbstractRpcClient { private static class CallFuture { final Call call; final int priority; - final Span span; + final TraceScope traceScope; // We will use this to stop the writer final static CallFuture DEATH_PILL = new CallFuture(null, -1, null); - CallFuture(Call call, int priority, Span span) { + CallFuture(Call call, int priority, TraceScope traceScope) { this.call = call; this.priority = priority; - this.span = span; + this.traceScope = traceScope; } } @@ -204,9 +234,9 @@ public class RpcClientImpl extends AbstractRpcClient { protected final BlockingQueue callsToWrite; - public CallFuture sendCall(Call call, int priority, Span span) + public CallFuture sendCall(Call call, int priority, TraceScope traceScope) throws InterruptedException, IOException { - CallFuture cts = new CallFuture(call, priority, span); + CallFuture cts = new CallFuture(call, priority, traceScope); if (!callsToWrite.offer(cts)) { throw new IOException("Can't add the call " + call.id + " to the write queue. callsToWrite.size()=" + callsToWrite.size()); @@ -267,7 +297,7 @@ public class RpcClientImpl extends AbstractRpcClient { } try { - Connection.this.tracedWriteRequest(cts.call, cts.priority, cts.span); + Connection.this.tracedWriteRequest(cts.call, cts.priority, cts.traceScope); } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug("call write error for call #" + cts.call.id @@ -863,12 +893,12 @@ public class RpcClientImpl extends AbstractRpcClient { } } - protected void tracedWriteRequest(Call call, int priority, Span span) throws IOException { - TraceScope ts = Trace.continueSpan(span); + protected void tracedWriteRequest(Call call, int priority, TraceScope traceScope) + throws IOException { try { - writeRequest(call, priority, span); + writeRequest(call, priority, traceScope); } finally { - ts.close(); + traceScope.close(); } } @@ -878,12 +908,17 @@ public class RpcClientImpl extends AbstractRpcClient { * threads. * @see #readResponse() */ - private void writeRequest(Call call, final int priority, Span span) throws IOException { + private void writeRequest(Call call, final int priority, TraceScope traceScope) + throws IOException { RequestHeader.Builder builder = RequestHeader.newBuilder(); builder.setCallId(call.id); - if (span != null) { - builder.setTraceInfo( - RPCTInfo.newBuilder().setParentId(span.getSpanId()).setTraceId(span.getTraceId())); + SpanId spanId = Tracer.getCurrentSpanId(); + if (spanId.isValid()) { + // Pre-4.0.0 htrace, we had parent and trace id. In 4.0.0, there is one id only and it is + // 128 bits. Rather than add new fields or change field names, just set the high and low of + // the 128 integer into the old parent id and trace id fields. + builder.setTraceInfo(RPCTInfo.newBuilder(). + setParentId(spanId.getHigh()).setTraceId(spanId.getLow())); } builder.setMethodName(call.md.getName()); builder.setRequestParam(call.param != null); @@ -1186,8 +1221,12 @@ public class RpcClientImpl extends AbstractRpcClient { final Connection connection = getConnection(ticket, call, addr); final CallFuture cts; + Tracer tracer = Tracer.curThreadTracer(); if (connection.callSender != null) { - cts = connection.callSender.sendCall(call, pcrc.getPriority(), Trace.currentSpan()); + // TODO: This used to be Trace.currentSpan(). Instead we make a newScope. Is that right? + cts = connection.callSender. + sendCall(call, pcrc.getPriority(), + tracer == null? null: tracer.newScope("call " + connection.callSender.getName())); pcrc.notifyOnCancel(new RpcCallback() { @Override public void run(Object parameter) { @@ -1201,7 +1240,8 @@ public class RpcClientImpl extends AbstractRpcClient { } } else { cts = null; - connection.tracedWriteRequest(call, pcrc.getPriority(), Trace.currentSpan()); + connection.tracedWriteRequest(call, pcrc.getPriority(), + tracer == null? null: tracer.newScope("call")); } while (!call.done) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index c1eb214..f5d2d83 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.RetryCounterFactory; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; @@ -44,8 +46,6 @@ import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.proto.CreateRequest; import org.apache.zookeeper.proto.SetDataRequest; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; /** * A zookeeper that can handle 'recoverable' errors. @@ -100,8 +100,7 @@ public class RecoverableZooKeeper { public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, int retryIntervalMillis) throws IOException { - this(quorumServers, sessionTimeout, watcher, maxRetries, retryIntervalMillis, - null); + this(quorumServers, sessionTimeout, watcher, maxRetries, retryIntervalMillis, null); } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", @@ -168,9 +167,9 @@ public class RecoverableZooKeeper { */ public void delete(String path, int version) throws InterruptedException, KeeperException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.delete"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.delete"); RetryCounter retryCounter = retryCounterFactory.create(); boolean isRetry = false; // False for first attempt, true for all retries. while (true) { @@ -212,9 +211,9 @@ public class RecoverableZooKeeper { */ public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.exists"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.exists"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -244,9 +243,9 @@ public class RecoverableZooKeeper { */ public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.exists"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.exists"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -285,10 +284,11 @@ public class RecoverableZooKeeper { * @return List of children znodes */ public List getChildren(String path, Watcher watcher) - throws KeeperException, InterruptedException { - TraceScope traceScope = null; + throws KeeperException, InterruptedException { + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = + tracer == null? null: tracer.newScope("RecoverableZookeeper.getChildren"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.getChildren"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -318,9 +318,10 @@ public class RecoverableZooKeeper { */ public List getChildren(String path, boolean watch) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = + tracer == null? null: tracer.newScope("RecoverableZookeeper.getChildren"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.getChildren"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -350,9 +351,9 @@ public class RecoverableZooKeeper { */ public byte[] getData(String path, Watcher watcher, Stat stat) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.getData"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.getData"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -383,9 +384,9 @@ public class RecoverableZooKeeper { */ public byte[] getData(String path, boolean watch, Stat stat) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.getData"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.getData"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -418,9 +419,9 @@ public class RecoverableZooKeeper { */ public Stat setData(String path, byte[] data, int version) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.setData"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.setData"); RetryCounter retryCounter = retryCounterFactory.create(); byte[] newData = appendMetaData(data); boolean isRetry = false; @@ -468,9 +469,9 @@ public class RecoverableZooKeeper { */ public List getAcl(String path, Stat stat) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.getAcl"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.getAcl"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -500,9 +501,9 @@ public class RecoverableZooKeeper { */ public Stat setAcl(String path, List acls, int version) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.setAcl"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.setAcl"); RetryCounter retryCounter = retryCounterFactory.create(); while (true) { try { @@ -544,9 +545,9 @@ public class RecoverableZooKeeper { public String create(String path, byte[] data, List acl, CreateMode createMode) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.create"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.create"); byte[] newData = appendMetaData(data); switch (createMode) { case EPHEMERAL: @@ -674,9 +675,9 @@ public class RecoverableZooKeeper { */ public List multi(Iterable ops) throws KeeperException, InterruptedException { - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: tracer.newScope("RecoverableZookeeper.multi"); try { - traceScope = Trace.startSpan("RecoverableZookeeper.multi"); RetryCounter retryCounter = retryCounterFactory.create(); Iterable multiOps = prepareZKMulti(ops); while (true) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java index 56de264..0f09364 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.trace; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.htrace.HTraceConfiguration; +import org.apache.htrace.core.HTraceConfiguration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -65,17 +65,16 @@ public class HBaseHTraceConfiguration extends HTraceConfiguration { @Override public String get(String key) { - return conf.get(KEY_PREFIX +key); + return conf.get(KEY_PREFIX + key); } @Override public String get(String key, String defaultValue) { - return conf.get(KEY_PREFIX + key,defaultValue); - + return conf.get(KEY_PREFIX + key, defaultValue); } @Override public boolean getBoolean(String key, boolean defaultValue) { return conf.getBoolean(KEY_PREFIX + key, defaultValue); } -} +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java deleted file mode 100644 index b90d191..0000000 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.trace; - -import java.io.IOException; -import java.util.Collection; -import java.util.HashSet; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.htrace.SpanReceiver; -import org.apache.htrace.SpanReceiverBuilder; -import org.apache.htrace.Trace; - -/** - * This class provides functions for reading the names of SpanReceivers from - * hbase-site.xml, adding those SpanReceivers to the Tracer, and closing those - * SpanReceivers when appropriate. - */ -@InterfaceAudience.Private -public class SpanReceiverHost { - public static final String SPAN_RECEIVERS_CONF_KEY = "hbase.trace.spanreceiver.classes"; - private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class); - private Collection receivers; - private Configuration conf; - private boolean closed = false; - - private static enum SingletonHolder { - INSTANCE; - Object lock = new Object(); - SpanReceiverHost host = null; - } - - public static SpanReceiverHost getInstance(Configuration conf) { - synchronized (SingletonHolder.INSTANCE.lock) { - if (SingletonHolder.INSTANCE.host != null) { - return SingletonHolder.INSTANCE.host; - } - - SpanReceiverHost host = new SpanReceiverHost(conf); - host.loadSpanReceivers(); - SingletonHolder.INSTANCE.host = host; - return SingletonHolder.INSTANCE.host; - } - - } - - SpanReceiverHost(Configuration conf) { - receivers = new HashSet(); - this.conf = conf; - } - - /** - * Reads the names of classes specified in the {@code hbase.trace.spanreceiver.classes} property - * and instantiates and registers them with the Tracer. - * - */ - public void loadSpanReceivers() { - String[] receiverNames = conf.getStrings(SPAN_RECEIVERS_CONF_KEY); - if (receiverNames == null || receiverNames.length == 0) { - return; - } - - SpanReceiverBuilder builder = new SpanReceiverBuilder(new HBaseHTraceConfiguration(conf)); - for (String className : receiverNames) { - className = className.trim(); - - SpanReceiver receiver = builder.spanReceiverClass(className).build(); - if (receiver != null) { - receivers.add(receiver); - LOG.info("SpanReceiver " + className + " was loaded successfully."); - } - } - for (SpanReceiver rcvr : receivers) { - Trace.addReceiver(rcvr); - } - } - - /** - * Calls close() on all SpanReceivers created by this SpanReceiverHost. - */ - public synchronized void closeReceivers() { - if (closed) return; - closed = true; - for (SpanReceiver rcvr : receivers) { - try { - rcvr.close(); - } catch (IOException e) { - LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e); - } - } - } -} diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 8702ac1..befc243 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1683,4 +1683,12 @@ possible configurations would overwhelm and obscure the important. The max number of threads used in MobCompactor. + + hbase.htrace.span.receiver.classes + + + The class name of the HTrace SpanReceivers to use inside HBase. If + there are no class names supplied here, tracings will not be emitted. + + diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTimeout.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTimeout.java index d6ee673..fc4a2be 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTimeout.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTimeout.java @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Rule; import org.junit.Ignore; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestRule; -import org.junit.rules.Timeout; @Category({SmallTests.class}) public class TestTimeout { diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index f820193..badbe69 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; import org.apache.hadoop.hbase.util.Addressing; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TraceScope; import java.io.IOException; @@ -134,7 +134,9 @@ public class MemcachedBlockCache implements BlockCache { // Assume that nothing is the block cache HFileBlock result = null; - try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) { + Tracer tracer = Tracer.curThreadTracer(); + try (TraceScope traceScope = tracer == null? null: + tracer.newScope("MemcachedBlockCache.getBlock")) { result = client.get(cacheKey.toString(), tc); } catch (Exception e) { // Catch a pretty broad set of exceptions to limit any changes in the memecache client diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 4423650..b3daf18 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -63,10 +63,10 @@ import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LoadTestTool; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.impl.AlwaysSampler; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.SpanId; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -158,6 +158,9 @@ public class IntegrationTestMTTR { @BeforeClass public static void setUp() throws Exception { + // TODO: Tracing had the ALWAYS Sampler turned on. The refactor to come up on htrace 4.0 did + // not add in this. Fix. + // Set up the integration test util if (util == null) { util = new IntegrationTestingUtility(); @@ -357,7 +360,7 @@ public class IntegrationTestMTTR { */ private static class TimingResult { DescriptiveStatistics stats = new DescriptiveStatistics(); - ArrayList traces = new ArrayList(10); + ArrayList traces = new ArrayList(10); /** * Add a result to this aggregate result. @@ -367,7 +370,7 @@ public class IntegrationTestMTTR { public void addResult(long time, Span span) { stats.addValue(TimeUnit.MILLISECONDS.convert(time, TimeUnit.NANOSECONDS)); if (TimeUnit.SECONDS.convert(time, TimeUnit.NANOSECONDS) >= 1) { - traces.add(span.getTraceId()); + traces.add(span.getSpanId()); } } @@ -412,7 +415,7 @@ public class IntegrationTestMTTR { long start = System.nanoTime(); TraceScope scope = null; try { - scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE); + scope = Tracer.curThreadTracer().newScope(getSpanName()); boolean actionResult = doAction(); if (actionResult && future.isDone()) { numAfterDone++; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java index 3fa8a9c..da3c4d5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java @@ -35,9 +35,8 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.ToolRunner; -import org.apache.htrace.Sampler; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -61,9 +60,10 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { private IntegrationTestingUtility util; private Random random = new Random(); private Admin admin; - private SpanReceiverHost receiverHost; public static void main(String[] args) throws Exception { + // TODO: Tracing had the ALWAYS Sampler turned on. The refactor to come up on htrace 4.0 did + // not add in this. Fix. Configuration configuration = HBaseConfiguration.create(); IntegrationTestingUtility.setUseDistributedCluster(configuration); IntegrationTestSendTraceRequests tool = new IntegrationTestSendTraceRequests(); @@ -108,13 +108,11 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { service.shutdown(); service.awaitTermination(100, TimeUnit.SECONDS); Thread.sleep(90000); - receiverHost.closeReceivers(); util.restoreCluster(); util = null; } private void doScans(ExecutorService service, final LinkedBlockingQueue rks) { - for (int i = 0; i < 100; i++) { Runnable runnable = new Runnable() { private TraceScope innerScope = null; @@ -123,7 +121,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { public void run() { ResultScanner rs = null; try { - innerScope = Trace.startSpan("Scan", Sampler.ALWAYS); + innerScope = Tracer.curThreadTracer().newScope("Scan"); Table ht = util.getConnection().getTable(tableName); Scan s = new Scan(); s.setStartRow(Bytes.toBytes(rowKeyQueue.take())); @@ -137,17 +135,13 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { accum |= Bytes.toLong(r.getRow()); } - innerScope.getSpan().addTimelineAnnotation("Accum result = " + accum); + innerScope.addTimelineAnnotation("Accum result = " + accum); ht.close(); ht = null; } catch (IOException e) { e.printStackTrace(); - - innerScope.getSpan().addKVAnnotation( - Bytes.toBytes("exception"), - Bytes.toBytes(e.getClass().getSimpleName())); - + innerScope.getSpan().addKVAnnotation("exception", e.getClass().getSimpleName()); } catch (Exception e) { } finally { if (innerScope != null) innerScope.close(); @@ -182,7 +176,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { long accum = 0; for (int x = 0; x < 5; x++) { try { - innerScope = Trace.startSpan("gets", Sampler.ALWAYS); + innerScope = Tracer.curThreadTracer().newScope("gets"); long rk = rowKeyQueue.take(); Result r1 = ht.get(new Get(Bytes.toBytes(rk))); if (r1 != null) { @@ -212,7 +206,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { private void createTable() throws IOException { TraceScope createScope = null; try { - createScope = Trace.startSpan("createTable", Sampler.ALWAYS); + createScope = Tracer.curThreadTracer().newScope("createTable"); util.createTable(tableName, familyName); } finally { if (createScope != null) createScope.close(); @@ -224,7 +218,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { try { if (admin.tableExists(tableName)) { - deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS); + deleteScope = Tracer.curThreadTracer().newScope("deleteTable"); util.deleteTable(tableName); } } finally { @@ -237,7 +231,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName); byte[] value = new byte[300]; for (int x = 0; x < 5000; x++) { - TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS); + TraceScope traceScope = Tracer.curThreadTracer().newScope("insertData"); try { for (int i = 0; i < 5; i++) { long rk = random.nextLong(); @@ -280,6 +274,8 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { Configuration conf = new Configuration(util.getConfiguration()); conf.setBoolean("hbase.zipkin.is-in-client-mode", true); - this.receiverHost = SpanReceiverHost.getInstance(conf); + /** this.receiverHost = SpanReceiverHost.getInstance(conf); + * TODO: FIX!! What does this mean in htrace 4.0? + */ } } diff --git a/hbase-protocol/src/main/protobuf/Tracing.proto b/hbase-protocol/src/main/protobuf/Tracing.proto index 5a64cfc..2e9246f 100644 --- a/hbase-protocol/src/main/protobuf/Tracing.proto +++ b/hbase-protocol/src/main/protobuf/Tracing.proto @@ -22,12 +22,15 @@ option java_outer_classname = "TracingProtos"; option java_generate_equals_and_hash = true; option optimize_for = SPEED; -//Used to pass through the information necessary to continue -//a trace after an RPC is made. All we need is the traceid -//(so we know the overarching trace this message is a part of), and -//the id of the current span when this message was sent, so we know -//what span caused the new span we will create when this message is received. +// Used to pass through the information necessary to continue +// a trace after an RPC is made. All we need is the traceid +// (so we know the overarching trace this message is a part of). +// Pre-4.0.0 htrace, we had parent and trace id. In 4.0.0, there is one id only and it is +// 128 bits. Rather than add new fields or change field names, just set the high and low of +// the 128 integer into the old parent id and trace id fields. message RPCTInfo { + // In 4.0.0 htrace, trace_id holds the low part of the 128 traceid optional int64 trace_id = 1; + // In 4.0.0 htrace, trace_id holds the high part of the 128 traceid optional int64 parent_id = 2; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index 1a8b847..6b7bb6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -23,11 +23,11 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Server; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.htrace.core.SpanId; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; /** * Abstract base class for all HBase event handlers. Subclasses should @@ -68,13 +68,13 @@ public abstract class EventHandler implements Runnable, Comparable { // Time to wait for events to happen, should be kept short protected int waitingTimeForEvents; - private final Span parent; + private final SpanId parentSpanId; /** * Default base class constructor. */ public EventHandler(Server server, EventType eventType) { - this.parent = Trace.currentSpan(); + this.parentSpanId = Tracer.getCurrentSpanId(); this.server = server; this.eventType = eventType; seqid = seqids.incrementAndGet(); @@ -98,7 +98,9 @@ public abstract class EventHandler implements Runnable, Comparable { } public void run() { - TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent); + Tracer tracer = Tracer.curThreadTracer(); + TraceScope chunk = tracer == null? null: + tracer.newScope(this.getClass().getSimpleName(), this.parentSpanId); try { process(); } catch(Throwable t) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 6970d27..49e633f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -59,8 +59,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IdLock; import org.apache.hadoop.hbase.util.ObjectIntPair; import org.apache.hadoop.io.WritableUtils; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import com.google.common.annotations.VisibleForTesting; @@ -1439,7 +1439,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { boolean useLock = false; IdLock.Entry lockEntry = null; - TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock"); + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: + tracer.newScope("HFileReaderImpl.readBlock"); try { while (true) { // Check cache for block. If found return. @@ -1452,9 +1454,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, useLock, isCompaction, updateCacheMetrics, expectedBlockType, expectedDataBlockEncoding); if (cachedBlock != null) { - if (Trace.isTracing()) { - traceScope.getSpan().addTimelineAnnotation("blockCacheHit"); - } + traceScope.addTimelineAnnotation("blockCacheHit"); assert cachedBlock.isUnpacked() : "Packed block leak."; if (cachedBlock.getBlockType().isData()) { if (updateCacheMetrics) { @@ -1480,9 +1480,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // Carry on, please load. } - if (Trace.isTracing()) { - traceScope.getSpan().addTimelineAnnotation("blockCacheMiss"); - } + traceScope.addTimelineAnnotation("blockCacheMiss"); + // Load block from filesystem. HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1, pread); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 26ffa95..5c57691 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -28,8 +28,8 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import com.google.protobuf.Message; @@ -40,8 +40,6 @@ import com.google.protobuf.Message; */ @InterfaceAudience.Private public class CallRunner { - private static final Log LOG = LogFactory.getLog(CallRunner.class); - private Call call; private RpcServerInterface rpcServer; private MonitoredRPCHandler status; @@ -99,8 +97,10 @@ public class CallRunner { throw new ServerNotRunningYetException("Server " + rpcServer.getListenerAddress() + " is not running yet"); } - if (call.tinfo != null) { - traceScope = Trace.startSpan(call.toTraceString(), call.tinfo); + if (call.spanId != null) { + Tracer tracer = Tracer.curThreadTracer(); + traceScope = tracer == null? null: + tracer.newScope(call.toTraceString(), call.spanId); } // make the call resultPair = this.rpcServer.call(call.service, call.md, call.param, call.cellScanner, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 8cead2a..5bd0ed7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -122,8 +122,8 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; +import org.apache.htrace.core.SpanId; import org.codehaus.jackson.map.ObjectMapper; -import org.apache.htrace.TraceInfo; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.BlockingService; @@ -301,7 +301,7 @@ public class RpcServer implements RpcServerInterface { // set at call completion protected long size; // size of current call protected boolean isError; - protected TraceInfo tinfo; + protected SpanId spanId; private ByteBuffer cellBlock = null; private User user; @@ -310,7 +310,7 @@ public class RpcServer implements RpcServerInterface { Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, Connection connection, Responder responder, - long size, TraceInfo tinfo, final InetAddress remoteAddress) { + long size, SpanId spanId, final InetAddress remoteAddress) { this.id = id; this.service = service; this.md = md; @@ -324,7 +324,7 @@ public class RpcServer implements RpcServerInterface { this.responder = responder; this.isError = false; this.size = size; - this.tinfo = tinfo; + this.spanId = spanId; this.user = connection.user == null? null: userProvider.create(connection.user); this.remoteAddress = remoteAddress; } @@ -1864,8 +1864,9 @@ public class RpcServer implements RpcServerInterface { return; } - TraceInfo traceInfo = header.hasTraceInfo() - ? new TraceInfo(header.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId()) + // ParentId is the high 64-bits and traceId is the low 64-bits in a SpanId. + SpanId traceInfo = header.hasTraceInfo() + ? new SpanId(header.getTraceInfo().getParentId(), header.getTraceInfo().getParentId()) : null; Call call = new Call(id, this.service, md, header, param, cellScanner, this, responder, totalRequestSize, traceInfo, RpcServer.getRemoteIp()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index cb9759d..5fad03f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -48,7 +48,6 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.StringUtils; -import com.google.protobuf.InvalidProtocolBufferException; import java.io.File; import java.io.IOException; @@ -770,7 +769,7 @@ public class TableMapReduceUtil { io.netty.channel.Channel.class, com.google.protobuf.Message.class, com.google.common.collect.Lists.class, - org.apache.htrace.Trace.class, + org.apache.htrace.core.Tracer.class, com.yammer.metrics.core.MetricsRegistry.class); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index d34f25e..e105689 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.htrace.Trace; +import org.apache.htrace.core.Tracer; /** * Handler to run disable of a table. @@ -205,11 +205,15 @@ public class DisableTableHandler extends EventHandler { continue; } final HRegionInfo hri = region; - pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler",new Runnable() { + Tracer tracer = Tracer.curThreadTracer(); + Runnable runnable = new Runnable() { public void run() { assignmentManager.unassign(hri); } - })); + }; + pool.execute(tracer != null? + tracer.wrap(runnable, "DisableTableHandler.BulkDisabler"): + runnable); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index 351751e3..d5613da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.htrace.Trace; +import org.apache.htrace.core.Tracer; @InterfaceAudience.Private public class DisableTableProcedure @@ -509,12 +509,15 @@ public class DisableTableProcedure && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) { continue; } - pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler", new Runnable() { + Runnable runnable = new Runnable() { @Override public void run() { assignmentManager.unassign(region); } - })); + }; + Tracer tracer = Tracer.curThreadTracer(); + pool.execute(tracer == null? runnable: + tracer.wrap(runnable, "DisableTableHandler.BulkDisabler")); } } @@ -540,4 +543,4 @@ public class DisableTableProcedure return regions != null && regions.isEmpty(); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index cc8c3a8..040f273 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -47,7 +47,8 @@ import org.apache.hadoop.hbase.util.CollectionBackedScanner; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; -import org.apache.htrace.Trace; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.Tracer; /** * The MemStore holds in-memory modifications to the Store. Modifications @@ -634,9 +635,8 @@ public class DefaultMemStore implements MemStore { this.snapshotAllocatorAtCreation = snapshotAllocator; this.snapshotAllocatorAtCreation.incScannerCount(); } - if (Trace.isTracing() && Trace.currentSpan() != null) { - Trace.currentSpan().addTimelineAnnotation("Creating MemStoreScanner"); - } + Span span = Tracer.getCurrentSpan(); + if (span != null) span.addTimelineAnnotation("Creating MemStoreScanner"); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 2c145b4..1514070 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -181,8 +181,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.util.StringUtils; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; @@ -5069,11 +5069,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (!waitForLock) { return null; } - TraceScope traceScope = null; + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = tracer == null? null: + tracer.newScope("HRegion.getRowLockInternal"); try { - if (Trace.isTracing()) { - traceScope = Trace.startSpan("HRegion.getRowLockInternal"); - } // Row is already locked by some other thread, give up or wait for it if (!existingContext.latch.await(this.rowLockWaitDuration, TimeUnit.MILLISECONDS)) { if(traceScope != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 570a5ba..e043f58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -141,7 +141,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.trace.SpanReceiverHost; +import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; @@ -172,6 +172,8 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics.util.MBeanUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TracerId; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.data.Stat; @@ -337,7 +339,7 @@ public class HRegionServer extends HasThread implements public static final String REGIONSERVER = "regionserver"; MetricsRegionServer metricsRegionServer; - private SpanReceiverHost spanReceiverHost; + private Tracer tracer; /** * ChoreService used to schedule tasks that we want to run periodically @@ -576,7 +578,12 @@ public class HRegionServer extends HasThread implements this.tableDescriptors = getFsTableDescriptors(); service = new ExecutorService(getServerName().toShortString()); - spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); + + // Set up tracer. Add ServerName to the TraceId so it comes out as "RegionServer,host,port,ts" + this.conf.set(HBaseHTraceConfiguration.KEY_PREFIX + TracerId.TRACER_ID_KEY, + "%{tname}," + this.serverName.toString()); + this.tracer = new Tracer.Builder().name("RegionServer"). + conf(new HBaseHTraceConfiguration(this.conf)).build(); // Some unit tests don't need a cluster, so no zookeeper at all if (!conf.getBoolean("hbase.testing.nocluster", false)) { @@ -2148,8 +2155,8 @@ public class HRegionServer extends HasThread implements this.cacheFlusher.join(); } - if (this.spanReceiverHost != null) { - this.spanReceiverHost.closeReceivers(); + if (this.tracer != null) { + this.tracer.close(); } if (this.walRoller != null) { Threads.shutdown(this.walRoller.getThread()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 40c5046..33f3887 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -57,8 +57,8 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import com.google.common.base.Preconditions; @@ -579,11 +579,9 @@ class MemStoreFlusher implements FlushRequester { * amount of memstore consumption. */ public void reclaimMemStoreMemory() { - TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory"); + TraceScope scope = Tracer.curThreadTracer().newScope("MemStoreFluser.reclaimMemStoreMemory"); if (isAboveHighWaterMark()) { - if (Trace.isTracing()) { - scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark."); - } + scope.addTimelineAnnotation("Force Flush. We're above high water mark."); long start = EnvironmentEdgeManager.currentTime(); synchronized (this.blockSignal) { boolean blocked = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 1c29827..0134811 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -81,10 +81,8 @@ import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.util.StringUtils; -import org.apache.htrace.NullScope; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import com.google.common.annotations.VisibleForTesting; import com.lmax.disruptor.BlockingWaitStrategy; @@ -647,11 +645,12 @@ public class FSHLog implements WAL { * @param nextWriter * @param startTimeNanos */ - private void preemptiveSync(final ProtobufLogWriter nextWriter) { + private void preemptiveSync(final ProtobufLogWriter nextWriter, final TraceScope scope) { long startTimeNanos = System.nanoTime(); try { nextWriter.sync(); - postSync(System.nanoTime() - startTimeNanos, 0); + scope.addTimelineAnnotation("preemptive"); + postSync(System.nanoTime() - startTimeNanos, 0, scope); } catch (IOException e) { // optimization failed, no need to abort here. LOG.warn("pre-sync failed but an optimization so keep going", e); @@ -673,7 +672,8 @@ public class FSHLog implements WAL { LOG.debug("WAL closing. Skipping rolling of writer"); return regionsToFlush; } - TraceScope scope = Trace.startSpan("FSHLog.rollWriter"); + Tracer tracer = Tracer.curThreadTracer(); + TraceScope scope = tracer == null? null: tracer.newScope("FSHLog.rollWriter"); try { Path oldPath = getOldPath(); Path newPath = getNewPath(); @@ -684,7 +684,7 @@ public class FSHLog implements WAL { nextHdfsOut = ((ProtobufLogWriter)nextWriter).getStream(); // If a ProtobufLogWriter, go ahead and try and sync to force setup of pipeline. // If this fails, we just keep going.... it is an optimization, not the end of the world. - preemptiveSync((ProtobufLogWriter)nextWriter); + preemptiveSync((ProtobufLogWriter)nextWriter, scope); } tellListenersAboutPreLogRoll(oldPath, newPath); // NewPath could be equal to oldPath if replaceWriter fails. @@ -697,7 +697,6 @@ public class FSHLog implements WAL { } } finally { closeBarrier.endOp(); - assert scope == NullScope.INSTANCE || !scope.isDetached(); scope.close(); } return regionsToFlush; @@ -812,7 +811,8 @@ public class FSHLog implements WAL { SafePointZigZagLatch zigzagLatch = (this.ringBufferEventHandler == null)? null: this.ringBufferEventHandler.attainSafePoint(); afterCreatingZigZagLatch(); - TraceScope scope = Trace.startSpan("FSHFile.replaceWriter"); + Tracer tracer = Tracer.curThreadTracer(); + TraceScope scope = tracer == null? null: tracer.newScope("FSHFile.replaceWriter"); try { // Wait on the safe point to be achieved. Send in a sync in case nothing has hit the // ring buffer between the above notification of writer that we want it to go to @@ -821,7 +821,7 @@ public class FSHLog implements WAL { // to come back. Cleanup this syncFuture down below after we are ready to run again. try { if (zigzagLatch != null) { - Trace.addTimelineAnnotation("awaiting safepoint"); + scope.addTimelineAnnotation("awaiting safepoint"); syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer()); } } catch (FailedSyncBeforeLogCloseException e) { @@ -835,9 +835,9 @@ public class FSHLog implements WAL { // TODO: This is close is inline with critical section. Should happen in background? try { if (this.writer != null) { - Trace.addTimelineAnnotation("closing writer"); + scope.addTimelineAnnotation("closing writer"); this.writer.close(); - Trace.addTimelineAnnotation("writer closed"); + scope.addTimelineAnnotation("writer closed"); } this.closeErrorCount.set(0); } catch (IOException ioe) { @@ -1083,7 +1083,8 @@ public class FSHLog implements WAL { if (this.closed) throw new IOException("Cannot append; log is closed"); // Make a trace scope for the append. It is closed on other side of the ring buffer by the // single consuming thread. Don't have to worry about it. - TraceScope scope = Trace.startSpan("FSHLog.append"); + Tracer tracer = Tracer.curThreadTracer(); + TraceScope scope = tracer == null? null: tracer.newScope("FSHLog.append"); // This is crazy how much it takes to make an edit. Do we need all this stuff!!!!???? We need // all this to make a key and then below to append the edit, we need to carry htd, info, @@ -1096,7 +1097,8 @@ public class FSHLog implements WAL { // edit with its edit/sequence id. The below entry.getRegionSequenceId will wait on the // latch to be thrown. TODO: reuse FSWALEntry as we do SyncFuture rather create per append. entry = new FSWALEntry(sequence, key, edits, sequenceId, inMemstore, htd, hri, memstoreCells); - truck.loadPayload(entry, scope.detach()); + scope.detach(); + truck.loadPayload(entry, scope); } finally { this.disruptor.getRingBuffer().publish(sequence); } @@ -1237,13 +1239,14 @@ public class FSHLog implements WAL { } // I got something. Lets run. Save off current sequence number in case it changes // while we run. - TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan()); + TraceScope scope = takeSyncFuture.getTraceScope(); + scope.reattach(); long start = System.nanoTime(); Throwable lastException = null; try { - Trace.addTimelineAnnotation("syncing writer"); + scope.addTimelineAnnotation("syncing writer"); writer.sync(); - Trace.addTimelineAnnotation("writer synced"); + scope.addTimelineAnnotation("writer synced"); currentSequence = updateHighestSyncedSequence(currentSequence); } catch (IOException e) { LOG.error("Error syncing, request close of WAL", e); @@ -1253,7 +1256,8 @@ public class FSHLog implements WAL { lastException = e; } finally { // reattach the span to the future before releasing. - takeSyncFuture.setSpan(scope.detach()); + scope.detach(); + takeSyncFuture.setTraceScope(scope); // First release what we 'took' from the queue. syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException); // Can we release other syncs? @@ -1261,7 +1265,7 @@ public class FSHLog implements WAL { if (lastException != null) requestLogRoll(); else checkLogRoll(); } - postSync(System.nanoTime() - start, syncCount); + postSync(System.nanoTime() - start, syncCount, scope); } catch (InterruptedException e) { // Presume legit interrupt. Thread.currentThread().interrupt(); @@ -1347,9 +1351,9 @@ public class FSHLog implements WAL { return publishSyncOnRingBuffer(null); } - private SyncFuture publishSyncOnRingBuffer(Span span) { + private SyncFuture publishSyncOnRingBuffer(TraceScope scope) { long sequence = this.disruptor.getRingBuffer().next(); - SyncFuture syncFuture = getSyncFuture(sequence, span); + SyncFuture syncFuture = getSyncFuture(sequence, scope); try { RingBufferTruck truck = this.disruptor.getRingBuffer().get(sequence); truck.loadPayload(syncFuture); @@ -1360,15 +1364,15 @@ public class FSHLog implements WAL { } // Sync all known transactions - private Span publishSyncThenBlockOnCompletion(Span span) throws IOException { - return blockOnSync(publishSyncOnRingBuffer(span)); + private TraceScope publishSyncThenBlockOnCompletion(TraceScope scope) throws IOException { + return blockOnSync(publishSyncOnRingBuffer(scope)); } - private Span blockOnSync(final SyncFuture syncFuture) throws IOException { + private TraceScope blockOnSync(final SyncFuture syncFuture) throws IOException { // Now we have published the ringbuffer, halt the current thread until we get an answer back. try { syncFuture.get(); - return syncFuture.getSpan(); + return syncFuture.getTraceScope(); } catch (InterruptedException ie) { LOG.warn("Interrupted", ie); throw convertInterruptedExceptionToIOException(ie); @@ -1384,22 +1388,22 @@ public class FSHLog implements WAL { return ioe; } - private SyncFuture getSyncFuture(final long sequence, Span span) { + private SyncFuture getSyncFuture(final long sequence, TraceScope scope) { SyncFuture syncFuture = this.syncFuturesByHandler.get(Thread.currentThread()); if (syncFuture == null) { syncFuture = new SyncFuture(); this.syncFuturesByHandler.put(Thread.currentThread(), syncFuture); } - return syncFuture.reset(sequence, span); + return syncFuture.reset(sequence, scope); } - private void postSync(final long timeInNanos, final int handlerSyncs) { + private void postSync(final long timeInNanos, final int handlerSyncs, final TraceScope scope) { if (timeInNanos > this.slowSyncNs) { String msg = new StringBuilder().append("Slow sync cost: ") .append(timeInNanos / 1000000).append(" ms, current pipeline: ") .append(Arrays.toString(getPipeLine())).toString(); - Trace.addTimelineAnnotation(msg); + scope.addTimelineAnnotation(msg); LOG.info(msg); } if (!listeners.isEmpty()) { @@ -1478,11 +1482,13 @@ public class FSHLog implements WAL { @Override public void sync() throws IOException { - TraceScope scope = Trace.startSpan("FSHLog.sync"); + Tracer tracer = Tracer.curThreadTracer(); + TraceScope scope = tracer == null? null: tracer.newScope("FSHLog.sync"); try { - scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach())); + scope.detach(); + scope = publishSyncThenBlockOnCompletion(scope); + scope.reattach(); } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); scope.close(); } } @@ -1493,11 +1499,14 @@ public class FSHLog implements WAL { // Already sync'd. return; } - TraceScope scope = Trace.startSpan("FSHLog.sync"); + Tracer tracer = Tracer.curThreadTracer(); + TraceScope scope = tracer == null? null: + tracer.newScope("FSHLog.sync txid=" + txid); try { - scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach())); + scope.detach(); + scope = publishSyncThenBlockOnCompletion(scope); + scope.reattach(); } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); scope.close(); } } @@ -1772,7 +1781,7 @@ public class FSHLog implements WAL { // Force flush of syncs if we are carrying a full complement of syncFutures. if (this.syncFuturesCount == this.syncFutures.length) endOfBatch = true; } else if (truck.hasFSWALEntryPayload()) { - TraceScope scope = Trace.continueSpan(truck.unloadSpanPayload()); + TraceScope scope = truck.unloadTraceSpanPayload(); try { FSWALEntry entry = truck.unloadFSWALEntryPayload(); if (this.exception != null) { @@ -1792,7 +1801,6 @@ public class FSHLog implements WAL { // Return to keep processing events coming off the ringbuffer return; } finally { - assert scope == NullScope.INSTANCE || !scope.isDetached(); scope.close(); // append scope is complete } } else { @@ -2053,4 +2061,4 @@ public class FSHLog implements WAL { } return new DatanodeInfo[0]; } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java index 25c2111..d093de0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java @@ -19,7 +19,8 @@ package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.htrace.Span; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.TraceScope; import com.lmax.disruptor.EventFactory; @@ -42,14 +43,14 @@ class RingBufferTruck { * The tracing span for this entry. Can be null. * TODO: Fix up tracing. */ - private Span span; + private TraceScope scope; /** * Load the truck with a {@link FSWALEntry} and associated {@link Span}. */ - void loadPayload(final FSWALEntry entry, final Span span) { + void loadPayload(final FSWALEntry entry, final TraceScope scope) { this.entry = entry; - this.span = span; + this.scope = scope; this.syncFuture = null; } @@ -59,7 +60,7 @@ class RingBufferTruck { void loadPayload(final SyncFuture syncFuture) { this.syncFuture = syncFuture; this.entry = null; - this.span = null; + this.scope = null; } /** @@ -99,10 +100,10 @@ class RingBufferTruck { /** * Unload the truck of its {@link Span} payload. The internal reference is released. */ - Span unloadSpanPayload() { - Span ret = this.span; - this.span = null; - return ret; + TraceScope unloadTraceSpanPayload() { + TraceScope scope = this.scope; + this.scope = null; + return scope; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java index 7de8367..3369f05 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java @@ -21,7 +21,8 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.htrace.Span; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.TraceScope; /** * A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till @@ -76,7 +77,7 @@ class SyncFuture { /** * Optionally carry a disconnected scope to the SyncRunner. */ - private Span span; + private TraceScope scope; /** * Call this method to clear old usage and get it ready for new deploy. Call @@ -94,17 +95,17 @@ class SyncFuture { * this method even if it is being used for the first time. * * @param sequence sequenceId from this Future's position in the RingBuffer - * @param span curren span, detached from caller. Don't forget to attach it when + * @param scope current TraceScope, detached from caller. Don't forget to attach it when * resuming after a call to {@link #get()}. * @return this */ - synchronized SyncFuture reset(final long sequence, Span span) { + synchronized SyncFuture reset(final long sequence, TraceScope scope) { if (t != null && t != Thread.currentThread()) throw new IllegalStateException(); t = Thread.currentThread(); if (!isDone()) throw new IllegalStateException("" + sequence + " " + Thread.currentThread()); this.doneSequence = NOT_DONE; this.ringBufferSequence = sequence; - this.span = span; + this.scope = scope; return this; } @@ -118,21 +119,21 @@ class SyncFuture { } /** - * Retrieve the {@code span} instance from this Future. EventHandler calls - * this method to continue the span. Thread waiting on this Future musn't call + * Retrieve the {@code TraceScope} instance from this Future. EventHandler calls + * this method to continue the TraceScope. Thread waiting on this Future musn't call * this method until AFTER calling {@link #get()} and the future has been * released back to the originating thread. */ - synchronized Span getSpan() { - return this.span; + synchronized TraceScope getTraceScope() { + return this.scope; } /** - * Used to re-attach a {@code span} to the Future. Called by the EventHandler + * Used to re-attach a {@code TraceScope} to the Future. Called by the EventHandler * after a it has completed processing and detached the span from its scope. */ - synchronized void setSpan(Span span) { - this.span = span; + synchronized void setTraceScope(TraceScope scope) { + this.scope = scope; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 569ef71..b3cd461 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -74,8 +74,11 @@ import org.apache.hadoop.hbase.io.hfile.RandomDistribution; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration; -import org.apache.hadoop.hbase.trace.SpanReceiverHost; -import org.apache.hadoop.hbase.util.*; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Hash; +import org.apache.hadoop.hbase.util.MurmurHash; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.YammerHistogramUtils; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; @@ -85,11 +88,9 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import org.codehaus.jackson.map.ObjectMapper; -import org.apache.htrace.Sampler; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.impl.ProbabilitySampler; import com.google.common.base.Objects; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -968,8 +969,7 @@ public class PerformanceEvaluation extends Configured implements Tool { protected final TestOptions opts; private final Status status; - private final Sampler traceSampler; - private final SpanReceiverHost receiverHost; + private final Tracer tracer; protected Connection connection; // protected Table table; @@ -985,10 +985,13 @@ public class PerformanceEvaluation extends Configured implements Tool { Test(final Connection con, final TestOptions options, final Status status) { this.connection = con; this.conf = con == null ? HBaseConfiguration.create() : this.connection.getConfiguration(); - this.receiverHost = this.conf == null? null: SpanReceiverHost.getInstance(conf); + this.tracer = new Tracer.Builder().name("Client"). + conf(new HBaseHTraceConfiguration(this.conf)). + build(); this.opts = options; this.status = status; this.testName = this.getClass().getSimpleName(); + /* TODO: Fix. if (options.traceRate >= 1.0) { this.traceSampler = Sampler.ALWAYS; } else if (options.traceRate > 0.0) { @@ -996,7 +999,7 @@ public class PerformanceEvaluation extends Configured implements Tool { this.traceSampler = new ProbabilitySampler(new HBaseHTraceConfiguration(conf)); } else { this.traceSampler = Sampler.NEVER; - } + }*/ everyN = (int) (opts.totalRows / (opts.totalRows * opts.sampleRate)); if (options.isValueZipf()) { this.zipf = new RandomDistribution.Zipf(this.rand, 1, options.getValueSize(), 1.2); @@ -1067,7 +1070,7 @@ public class PerformanceEvaluation extends Configured implements Tool { if (!opts.oneCon) { connection.close(); } - receiverHost.closeReceivers(); + tracer.close(); } abstract void onTakedown() throws IOException; @@ -1100,7 +1103,7 @@ public class PerformanceEvaluation extends Configured implements Tool { for (int i = opts.startRow; i < lastRow; i++) { if (i % everyN != 0) continue; long startTime = System.nanoTime(); - TraceScope scope = Trace.startSpan("test row", traceSampler); + TraceScope scope = this.tracer.newScope("test row"); try { testRow(i); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index e6bde4e..fec537b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -20,6 +20,15 @@ package org.apache.hadoop.hbase.client; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -35,14 +44,13 @@ import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.junit.AfterClass; @@ -51,15 +59,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; - @Category({MediumTests.class, ClientTests.class}) public class TestReplicaWithCluster { private static final Log LOG = LogFactory.getLog(TestReplicaWithCluster.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java index 65dff1b..0225590 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java @@ -21,66 +21,82 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import java.lang.reflect.Method; import java.util.Collection; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.htrace.Sampler; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.TraceTree; -import org.apache.htrace.impl.POJOSpanReceiver; +import org.apache.htrace.core.AlwaysSampler; +import org.apache.htrace.core.POJOSpanReceiver; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.SpanId; +import org.apache.htrace.core.StandardOutSpanReceiver; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; +import org.apache.htrace.core.TracerId; +import org.apache.htrace.core.TracerPool; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.common.collect.Multimap; - @Category({MiscTests.class, MediumTests.class}) public class TestHTraceHooks { private static final byte[] FAMILY_BYTES = "family".getBytes(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static POJOSpanReceiver rcvr; - private static long ROOT_SPAN_ID = 0; + private static SpanId ROOT_SPAN_ID = new SpanId(0, 0); @BeforeClass public static void before() throws Exception { - - // Find out what the right value to use fo SPAN_ROOT_ID after HTRACE-111. We use HTRACE-32 - // to find out to detect if we are using HTrace 3.2 or not. - try { - Method m = Span.class.getMethod("addKVAnnotation", String.class, String.class); - } catch (NoSuchMethodException e) { - ROOT_SPAN_ID = 0x74aceL; // Span.SPAN_ROOT_ID pre HTrace-3.2 - } - - TEST_UTIL.startMiniCluster(2, 3); - rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration())); - Trace.addReceiver(rcvr); + MiniHBaseCluster cluster = TEST_UTIL.startMiniCluster(2, 3); + HBaseHTraceConfiguration htraceConf = + new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()); + rcvr = new POJOSpanReceiver(htraceConf); + TracerPool.getGlobalTracerPool().loadReceiverType(POJOSpanReceiver.class.getName(), htraceConf, + cluster.getClass().getClassLoader()); } @AfterClass public static void after() throws Exception { TEST_UTIL.shutdownMiniCluster(); - Trace.removeReceiver(rcvr); - rcvr = null; + } + + @Test + public void testBasicLoadTracer() { + Configuration conf = new Configuration(); + conf.setStrings(HBaseHTraceConfiguration.KEY_PREFIX + Tracer.SPAN_RECEIVER_CLASSES_KEY, + StandardOutSpanReceiver.class.getName()); + conf.setStrings(HBaseHTraceConfiguration.KEY_PREFIX + Tracer.SAMPLER_CLASSES_KEY, + AlwaysSampler.class.getName()); + conf.set(HBaseHTraceConfiguration.KEY_PREFIX + TracerId.TRACER_ID_KEY, + "%{tname}," + "ANYTHING"); + try (Tracer tracer = + new Tracer.Builder().name("test").conf(new HBaseHTraceConfiguration(conf)).build()) { + TraceScope scope = tracer.newScope("First"); + scope.addKVAnnotation("hello", "world"); + Span span = scope.getSpan().child("child"); + System.out.println("Is running=" + span.isRunning()); + TraceScope subScope = tracer.newScope("Second"); + TraceScope subSubScope = tracer.newScope("Third"); + subSubScope.close(); + subScope.close(); + scope.close(); + } } @Test public void testTraceCreateTable() throws Exception { - TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS); + TraceScope tableCreationSpan = Tracer.curThreadTracer().newScope("creating table"); Table table; try { - table = TEST_UTIL.createTable(TableName.valueOf("table"), FAMILY_BYTES); } finally { @@ -92,11 +108,13 @@ public class TestHTraceHooks { TEST_UTIL.waitFor(1000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return rcvr.getSpans().size() >= 5; + return ((POJOSpanReceiver)TracerPool.getGlobalTracerPool().getReceivers()[0]). + getSpans().size() >= 5; } }); - Collection spans = rcvr.getSpans(); + Collection spans = + ((POJOSpanReceiver)TracerPool.getGlobalTracerPool().getReceivers()[0]).getSpans(); TraceTree traceTree = new TraceTree(spans); Collection roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID); @@ -116,11 +134,11 @@ public class TestHTraceHooks { assertTrue(createTableCount >= 1); assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3); assertTrue(spans.size() > 5); - + Put put = new Put("row".getBytes()); put.add(FAMILY_BYTES, "col".getBytes(), "value".getBytes()); - TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS); + TraceScope putSpan = Tracer.curThreadTracer().newScope("doing put"); try { table.put(put); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java new file mode 100644 index 0000000..9231b86 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.trace; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.htrace.core.MilliSpan; +import org.apache.htrace.core.Span; +import org.apache.htrace.core.SpanId; + +/** + * Used to create the graph formed by spans. Used to be in htrace 3.2. Copied over here for tests + * for htrace 4.0. Remove when htrace 4.0 has better test facility that includes what this class + * does. + */ +public class TraceTree { + public static final Log LOG = LogFactory.getLog(TraceTree.class); + + + public static class SpansByParent { + /** + * Compare two spans by span ID. + */ + private static Comparator COMPARATOR = + new Comparator() { + @Override + public int compare(Span a, Span b) { + return a.getSpanId().compareTo(b.getSpanId()); + } + }; + + private final TreeSet treeSet; + + private final HashMap> parentToSpans; + + SpansByParent(Collection spans) { + TreeSet treeSet = new TreeSet(COMPARATOR); + parentToSpans = new HashMap>(); + for (Span span : spans) { + treeSet.add(span); + for (SpanId parent : span.getParents()) { + LinkedList list = parentToSpans.get(parent); + if (list == null) { + list = new LinkedList(); + parentToSpans.put(parent, list); + } + list.add(span); + } + if (span.getParents().length == 0) { + LinkedList list = parentToSpans.get(Long.valueOf(0L)); + if (list == null) { + list = new LinkedList(); + parentToSpans.put(SpanId.INVALID, list); + } + list.add(span); + } + } + this.treeSet = treeSet; + } + + public List find(SpanId parentId) { + LinkedList spans = parentToSpans.get(parentId); + if (spans == null) { + return new LinkedList(); + } + return spans; + } + + public Iterator iterator() { + return Collections.unmodifiableSortedSet(treeSet).iterator(); + } + } + + public static class SpansByProcessId { + /** + * Compare two spans by process ID, and then by span ID. + */ + private static Comparator COMPARATOR = + new Comparator() { + @Override + public int compare(Span a, Span b) { + return a.getTracerId().compareTo(b.getTracerId()); + } + }; + + private final TreeSet treeSet; + + SpansByProcessId(Collection spans) { + TreeSet treeSet = new TreeSet(COMPARATOR); + for (Span span : spans) { + treeSet.add(span); + } + this.treeSet = treeSet; + } + + public List find(String processId) { + List spans = new ArrayList(); + Span span = new MilliSpan.Builder(). + spanId(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE)). + tracerId(processId). + build(); + while (true) { + span = treeSet.higher(span); + if (span == null) { + break; + } + if (span.getTracerId().equals(processId)) { + break; + } + spans.add(span); + } + return spans; + } + + public Iterator iterator() { + return Collections.unmodifiableSortedSet(treeSet).iterator(); + } + } + + private final SpansByParent spansByParent; + private final SpansByProcessId spansByProcessId; + + /** + * Create a new TraceTree + * + * @param spans The collection of spans to use to create this TraceTree. Should + * have at least one root span. + */ + public TraceTree(Collection spans) { + this.spansByParent = new SpansByParent(spans); + this.spansByProcessId = new SpansByProcessId(spans); + } + + public SpansByParent getSpansByParent() { + return spansByParent; + } + + public SpansByProcessId getSpansByProcessId() { + return spansByProcessId; + } + + @Override + public String toString() { + StringBuilder bld = new StringBuilder(); + String prefix = ""; + for (Iterator iter = spansByParent.iterator(); iter.hasNext();) { + Span span = iter.next(); + bld.append(prefix).append(span.toString()); + prefix = "\n"; + } + return bld.toString(); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java index 28c68b3..396d449 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java @@ -19,19 +19,16 @@ package org.apache.hadoop.hbase.wal; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; @@ -46,36 +43,30 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MockRegionServerServices; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.LogRoller; -import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration; -import org.apache.hadoop.hbase.trace.SpanReceiverHost; -import org.apache.hadoop.hbase.wal.WALProvider.Writer; -import org.apache.hadoop.hbase.wal.WAL; +// imports for things that haven't moved from regionserver.wal yet. +import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogReader; +import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter; +import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.htrace.HTraceConfiguration; -import org.apache.htrace.Sampler; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.impl.ProbabilitySampler; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; import com.yammer.metrics.core.Histogram; import com.yammer.metrics.core.Meter; import com.yammer.metrics.core.MetricsRegistry; import com.yammer.metrics.reporting.ConsoleReporter; -// imports for things that haven't moved from regionserver.wal yet. -import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogReader; -import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter; -import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; -import org.apache.hadoop.hbase.regionserver.wal.WALEdit; - /** * This class runs performance benchmarks for {@link WAL}. * See usage for this tool by running: @@ -127,7 +118,6 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { private final HRegion region; private final int syncInterval; private final HTableDescriptor htd; - private final Sampler loopSampler; WALPutBenchmark(final HRegion region, final HTableDescriptor htd, final long numIterations, final boolean noSync, final int syncInterval, @@ -138,6 +128,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { this.numFamilies = htd.getColumnFamilies().length; this.region = region; this.htd = htd; + /* TODO String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes"); if (spanReceivers == null || spanReceivers.isEmpty()) { loopSampler = Sampler.NEVER; @@ -155,7 +146,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { getConf().setDouble("hbase.sampler.fraction", traceFreq); loopSampler = new ProbabilitySampler(new HBaseHTraceConfiguration(getConf())); } - } + }*/ } @Override @@ -166,13 +157,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { WAL wal = region.getWAL(); TraceScope threadScope = - Trace.startSpan("WALPerfEval." + Thread.currentThread().getName()); + Tracer.curThreadTracer().newScope("WALPerfEval." + Thread.currentThread().getName()); try { long startTime = System.currentTimeMillis(); int lastSync = 0; for (int i = 0; i < numIterations; ++i) { - assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected."; - TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler); + TraceScope loopScope = Tracer.curThreadTracer().newScope("runLoopIter" + i); try { long now = System.nanoTime(); Put put = setupPut(rand, key, value, numFamilies); @@ -305,10 +295,11 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { FSUtils.setFsDefault(getConf(), FSUtils.getRootDir(getConf())); FileSystem fs = FileSystem.get(getConf()); LOG.info("FileSystem: " + fs); - + /* SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null; final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER; - TraceScope scope = Trace.startSpan("WALPerfEval", sampler); + */ + TraceScope scope = Tracer.curThreadTracer().newScope("WALPerfEval"); try { if (rootRegionDir == null) { @@ -330,8 +321,10 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { // a table per desired region means we can avoid carving up the key space final HTableDescriptor htd = createHTableDescriptor(i, numFamilies); regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller); - benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync, - syncInterval, traceFreq)); + + benchmarks[i] = + Tracer.curThreadTracer().wrap(new WALPutBenchmark(regions[i], htd, numIterations, + noSync, syncInterval, traceFreq), "put sync=" + noSync); } ConsoleReporter.enable(this.metrics, 30, TimeUnit.SECONDS); long putTime = runBenchmark(benchmarks, numThreads); @@ -380,7 +373,6 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { // We may be called inside a test that wants to keep on using the fs. if (!noclosefs) fs.close(); scope.close(); - if (receiverHost != null) receiverHost.closeReceivers(); } return(0); diff --git a/pom.xml b/pom.xml index 46cd59e..7682ab2 100644 --- a/pom.xml +++ b/pom.xml @@ -1237,7 +1237,7 @@ 1.6.8 4.12 1.3 - 3.1.0-incubating + 4.0.0-incubating 1.2.17 1.10.8 2.5.0 diff --git a/src/main/asciidoc/_chapters/tracing.adoc b/src/main/asciidoc/_chapters/tracing.adoc index 9b3711e..f634535 100644 --- a/src/main/asciidoc/_chapters/tracing.adoc +++ b/src/main/asciidoc/_chapters/tracing.adoc @@ -123,26 +123,18 @@ into: [source,java] ---- -TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS); +Tracer tracer = Tracer.curThreadTracer(); +TraceScope ts = tracer == null? null: tracer.newScope("Gets"); try { Table table = connection.getTable(TableName.valueOf("t1")); Get get = new Get(Bytes.toBytes("r1")); Result res = table.get(get); } finally { - ts.close(); + if (ts != null) ts.close(); } ---- -If you wanted to trace half of your 'get' operations, you would pass in: - -[source,java] ----- - -new ProbabilitySampler(0.5) ----- - -in lieu of `Sampler.ALWAYS` to `Trace.startSpan()`. -See the HTrace _README_ for more information on Samplers. +See the HTrace documentation for more information. [[tracing.client.shell]] == Tracing from HBase Shell