diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index f947a3acb9..53e94d55cc 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -87,6 +87,8 @@ public interface MetricsMasterSource extends BaseSource { String SERVER_CRASH_METRIC_PREFIX = "serverCrash"; + String SERVER_ABORT_PREFIX = "rsabort."; + /** * Increment the number of requests the cluster has seen. * @@ -98,4 +100,6 @@ public interface MetricsMasterSource extends BaseSource { * @return {@link OperationMetrics} containing common metrics for server crash operation */ OperationMetrics getServerCrashMetrics(); + + void addRsFatal(String msg); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java index 53d3d18039..b157f1b943 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java @@ -41,6 +41,8 @@ public interface ExceptionTrackingSource extends BaseSource { "rest of the requests will have to be retried."; String EXCEPTIONS_CALL_QUEUE_TOO_BIG = "exceptions.callQueueTooBig"; String EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC = "Call queue is full"; + String EXCEPTIONS_CALL_TIMEOUT = "exceptions.callTimeout"; + String EXCEPTIONS_NOT_RUNNING_YET = "exceptions.notRunningYet"; void exception(); @@ -56,4 +58,6 @@ public interface ExceptionTrackingSource extends BaseSource { void tooBusyException(); void multiActionTooLargeException(); void callQueueTooBigException(); + void callTimeoutException(); + void notRunningYetException(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 6d9ce54486..e0f36d9a0f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -220,6 +220,9 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo */ void updateCompactionOutputSize(boolean isMajor, long bytes); + /** Adds an exception from DFS to the exception metrics. */ + void addDfsException(Throwable t); + // Strings used for exporting to metrics system. String REGION_COUNT = "regionCount"; String REGION_COUNT_DESC = "Number of regions"; @@ -556,4 +559,14 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String AVERAGE_REGION_SIZE = "averageRegionSize"; String AVERAGE_REGION_SIZE_DESC = "Average region size over the RegionServer including memstore and storefile sizes."; + + + String FS_EXCEPTION_PREFIX = "dfsexception."; + String FS_NETWORK_EXCEPTION = FS_EXCEPTION_PREFIX + "network"; + String FS_NETWORK_EXCEPTION_DESC = "Number of network-related errors from DFS calls"; + String FS_OTHER_IO_EXCEPTION = FS_EXCEPTION_PREFIX + "io"; + String FS_OTHER_IO_EXCEPTION_DESC = "Number of uncategorized IO errors from DFS calls"; + String FS_OTHER_EXCEPTION = FS_EXCEPTION_PREFIX + "other"; + String FS_OTHER_EXCEPTION_DESC = + "Number of uncategorized (non-interrupted) errors from DFS calls"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index 03ebc4cbd7..74b0ab24a0 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -523,4 +523,8 @@ public interface MetricsRegionServerWrapper { long getTrailerHitCount(); long getTotalRowActionRequestCount(); + + String getConfVar(String name, String defaultValue); + + boolean isNetworkException(Throwable t); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index fc49a40aad..1b9c2252a0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.master; +import java.util.concurrent.atomic.AtomicLongArray; + import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.hbase.metrics.OperationMetrics; @@ -121,6 +123,11 @@ public class MetricsMasterSourceImpl .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) .tag(Interns.info(IS_ACTIVE_MASTER_NAME, IS_ACTIVE_MASTER_DESC), String.valueOf(masterWrapper.getIsActiveMaster())); + for (RS_ABORT_TYPE type : RS_ABORT_TYPE.values()) { + String str = type.toString(); + metricsRecordBuilder.addCounter(Interns.info(SERVER_ABORT_PREFIX + str, str), + abortCounters.get(type.value)); + } } metricsRegistry.snapshot(metricsRecordBuilder, all); @@ -130,4 +137,47 @@ public class MetricsMasterSourceImpl public OperationMetrics getServerCrashMetrics() { return serverCrashMetrics; } + + // TODO: ideally we should change this to use dynamic registry like the RS metrics + private enum RS_ABORT_TYPE { + ioerror(0), zkerror(1), masterkill(2), mastererror(3), unknown(4), other(5); + + private final int value; + public final static int MAX_VALUE; + + RS_ABORT_TYPE(int i) { + this.value = i; + } + + static { + int max = Integer.MIN_VALUE; + for (RS_ABORT_TYPE ev : RS_ABORT_TYPE.values()) { + max = Math.max(ev.value, max); + } + MAX_VALUE = max; + } + } + + private final AtomicLongArray abortCounters = new AtomicLongArray(RS_ABORT_TYPE.MAX_VALUE + 1); + + @Override + public void addRsFatal(String msg) { + // The API for server crashes is a string, so for now try to classify based on substrings. + msg = msg == null ? "" : msg.toLowerCase(); + RS_ABORT_TYPE type = RS_ABORT_TYPE.other; + if (msg.contains("droppedsnapshot") || msg.contains("timeoutioexception")) { + type = RS_ABORT_TYPE.ioerror; + } else if (msg.contains("keeperexception")) { + type = RS_ABORT_TYPE.zkerror; + } else if (msg.contains("youaredead")) { + type = RS_ABORT_TYPE.masterkill; + } else if (msg.contains("failed to report") && msg.contains("to master")) { + type = RS_ABORT_TYPE.mastererror; + } else if (msg.contains("regionserverabortedexception")) { + type = RS_ABORT_TYPE.unknown; // The original reason is actually lost; should not happen. + } + abortCounters.incrementAndGet(type.value); + } } + + diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java index 3af27d89b9..a1d2c1f18d 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java @@ -38,6 +38,8 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl protected MutableFastCounter exceptionsMoved; protected MutableFastCounter exceptionsMultiTooLarge; protected MutableFastCounter exceptionsCallQueueTooBig; + protected MutableFastCounter exceptionsCallTimeout; + protected MutableFastCounter exceptionsNotRunningYet; public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { @@ -66,6 +68,10 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); this.exceptionsCallQueueTooBig = this.getMetricsRegistry() .newCounter(EXCEPTIONS_CALL_QUEUE_TOO_BIG, EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC, 0L); + this.exceptionsCallTimeout = this.getMetricsRegistry() + .newCounter(EXCEPTIONS_CALL_TIMEOUT, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsNotRunningYet = this.getMetricsRegistry() + .newCounter(EXCEPTIONS_NOT_RUNNING_YET, EXCEPTIONS_TYPE_DESC, 0L); } @Override @@ -117,4 +123,14 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl public void callQueueTooBigException() { exceptionsCallQueueTooBig.incr(); } + + @Override + public void callTimeoutException() { + exceptionsCallTimeout.incr(); + } + + @Override + public void notRunningYetException() { + exceptionsNotRunningYet.incr(); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 58c42a5583..1425fa060e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -20,12 +20,19 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; +import java.io.InterruptedIOException; +import java.io.IOException; +import java.nio.channels.ClosedByInterruptException; +import java.util.HashMap; +import java.util.Map; + /** * Hadoop2 implementation of MetricsRegionServerSource. * @@ -34,7 +41,6 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsRegionServerSourceImpl extends BaseSourceImpl implements MetricsRegionServerSource { - final MetricsRegionServerWrapper rsWrap; private final MetricHistogram putHisto; private final MetricHistogram putBatchHisto; @@ -90,6 +96,20 @@ public class MetricsRegionServerSourceImpl private final MetricHistogram pausesWithGc; private final MetricHistogram pausesWithoutGc; + private final MutableFastCounter fsNetworkErrors; + private final MutableFastCounter fsOtherIoException; + private final MutableFastCounter fsOtherException; + + private final Map fsCustomExceptions = new HashMap<>(); + + private static final String CUSTOM_EXCEPTION_METRICS_STRING = + "hbase.metrics.custom.exception.metrics"; + private static final String DEFAULT_CUSTOM_EXCEPTION_METRICS_STRING = + "could only be replicated to,replicaWrite;enough number of replicas,replicaWrite;" + + "NotReplicatedYetException,replicaWrite;Could not get block locations,getBlockLocations;" + + "TimeoutIOException,timeoutIo"; + + public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); } @@ -177,6 +197,33 @@ public class MetricsRegionServerSourceImpl WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); + + fsNetworkErrors = getMetricsRegistry().newCounter( + FS_NETWORK_EXCEPTION, FS_NETWORK_EXCEPTION_DESC, 0L); + fsOtherIoException = getMetricsRegistry().newCounter( + FS_OTHER_IO_EXCEPTION, FS_OTHER_IO_EXCEPTION_DESC, 0L); + fsOtherException = getMetricsRegistry().newCounter( + FS_OTHER_EXCEPTION, FS_OTHER_EXCEPTION_DESC, 0L); + + String customExceptionMetrics = (rsWrap == null) ? null : rsWrap.getConfVar( + CUSTOM_EXCEPTION_METRICS_STRING, DEFAULT_CUSTOM_EXCEPTION_METRICS_STRING); + if (customExceptionMetrics != null && !customExceptionMetrics.isEmpty()) { + Map counterByName = new HashMap<>(); + String[] entries = customExceptionMetrics.split(";", -1); + for (String entry : entries) { + String[] strAndName = entry.split(",", -1); + if (strAndName.length != 2) { + continue; + } + String name = FS_EXCEPTION_PREFIX + strAndName[1]; + MutableFastCounter counter = counterByName.get(name); + if (counter == null) { + counter = getMetricsRegistry().newCounter(name, name, 0L); + counterByName.put(name, counter); + } + fsCustomExceptions.put(strAndName[0], counter); + } + } } @Override @@ -595,4 +642,49 @@ public class MetricsRegionServerSourceImpl public void updatePutBatch(long t) { putBatchHisto.add(t); } + + @Override + public void addDfsException(Throwable t) { + boolean hasIoException = false; + while (t != null) { + if (rsWrap != null && rsWrap.isNetworkException(t)) { // Some tests pass in null. + fsNetworkErrors.incr(); + return; + } + if (t instanceof InterruptedException || t instanceof ClosedByInterruptException + || t instanceof InterruptedIOException) { + return; // Don't report interrupted exceptions unless the are also network errors. + } + if (t instanceof IOException) { + hasIoException = true; + if (t instanceof RemoteException) { + Throwable oldT = t; + t = ((RemoteException)t).unwrapRemoteException(); + if (oldT == t) { // Couldn't get the nested exception; cause is probably null. + t = t.getCause(); + } + continue; + } + } + if (!fsCustomExceptions.isEmpty()) { + String name = t.getClass().getSimpleName(), str = t.getLocalizedMessage(); + // We don't expect this structure to be big; or the exceptions to happen on critical path. + for (Map.Entry e : fsCustomExceptions.entrySet()) { + String key = e.getKey(); + if (name.contains(key) || str.contains(key)) { + e.getValue().incr(); + return; + } + } + } + + t = t.getCause(); + } + + if (hasIoException) { + fsOtherIoException.incr(); + } else { + fsOtherException.incr(); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index 9d5373ce04..855619b568 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.CallQueueTooBigException; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.yetus.audience.InterfaceAudience; @@ -116,6 +117,11 @@ public class MetricsHBaseServer { source.multiActionTooLargeException(); } else if (throwable instanceof CallQueueTooBigException) { source.callQueueTooBigException(); + } else if (throwable instanceof CallTimeoutException) { + source.callTimeoutException(); + } else if (throwable instanceof PleaseHoldException + || throwable instanceof ServerNotRunningYetException) { + source.notRunningYetException(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 1ff3f0eaa7..93151a3dc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -553,6 +553,8 @@ public class MasterRpcServices extends RSRpcServices ServerName sn = ProtobufUtil.toServerName(request.getServer()); String msg = sn + " reported a fatal error:\n" + errorText; LOG.warn(msg); + // Note: ideally this API should be improved so we don't have to do string parsing. + master.getMasterMetrics().addRsFatal(errorText); master.rsFatals.add(msg); return ReportRSFatalErrorResponse.newBuilder().build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java index 83a6988006..a298f0eb55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java @@ -177,4 +177,8 @@ public class MetricsMaster { public void incrementSnapshotFetchTime(long executionTime) { masterQuotaSource.incrementSnapshotObserverSnapshotFetchTime(executionTime); } + + public void addRsFatal(String msg) { + masterSource.addRsFatal(msg); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index fbf73f36ee..ac552470d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -642,6 +642,11 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati } catch (IOException ex) { IOException remoteEx = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex; + RegionServerServices rss = this.region.getRegionServerServices(); + if (rss != null && rss.getMetrics() != null) { + rss.getMetrics().addDfsException(remoteEx); + } + LOG.error("Compaction failed " + this, remoteEx); if (remoteEx != ex) { LOG.info("Compaction failed at original callstack: " + formatStackTrace(ex)); @@ -650,6 +655,11 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati server.checkFileSystem(); } catch (Exception ex) { LOG.error("Compaction failed " + this, ex); + RegionServerServices rss = this.region.getRegionServerServices(); + if (rss != null && rss.getMetrics() != null) { + rss.getMetrics().addDfsException(ex); + } + region.reportCompactionRequestFailure(); server.checkFileSystem(); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 5056ad7c1d..c78bf67b0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1038,6 +1038,10 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } } } catch (IOException e) { + RegionServerServices rss = this.region.getRegionServerServices(); + if (rss != null && rss.getMetrics() != null) { + rss.getMetrics().addDfsException(e); + } LOG.warn("Failed flushing store file, retrying num={}", i, e); lastException = e; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 21534cedb1..71adeec98b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -248,4 +248,8 @@ public class MetricsRegionServer { public void incrementRegionSizeReportingChoreTime(long time) { quotaSource.incrementRegionSizeReportingChoreTime(time); } + + public void addDfsException(Throwable t) { + this.serverSource.addDfsException(t); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 33a6ee0ee2..4e45265806 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.net.SocketTimeoutException; +import java.nio.channels.ClosedChannelException; import java.util.Collection; import java.util.List; import java.util.Optional; @@ -47,6 +49,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.channel.ConnectTimeoutException; + /** * Impl for exposing HRegionServer Information through Hadoop's metrics 2 system. */ @@ -1006,4 +1010,17 @@ class MetricsRegionServerWrapperImpl public long getTrailerHitCount() { return this.cacheStats.map(CacheStats::getTrailerHitCount).orElse(0L); } + + @Override + public String getConfVar(String name, String defaultValue) { + return regionServer.getConfiguration().get(name, defaultValue); + } + + @Override + public boolean isNetworkException(Throwable t) { + // Here to avoid netty dependencies in compat. + return (t instanceof SocketTimeoutException || t instanceof ClosedChannelException + || t instanceof ConnectTimeoutException); + } } + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java index b003b44524..07f9ce09e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java @@ -579,4 +579,14 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe public long getAverageRegionSize() { return 10000000; } + + @Override + public String getConfVar(String name, String defaultValue) { + return defaultValue; + } + + @Override + public boolean isNetworkException(Throwable t) { + return false; + } } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java index 8c4c6f08d2..2bd3dec47b 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.CallQueueTooBigException; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; @@ -31,6 +32,8 @@ import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.exceptions.ScannerResetException; +import org.apache.hadoop.hbase.ipc.CallTimeoutException; +import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.thrift.generated.IOError; import org.apache.hadoop.hbase.thrift2.generated.TIOError; import org.apache.yetus.audience.InterfaceAudience; @@ -145,6 +148,11 @@ public class ThriftMetrics { source.multiActionTooLargeException(); } else if (throwable instanceof CallQueueTooBigException) { source.callQueueTooBigException(); + } else if (throwable instanceof CallTimeoutException) { + source.callTimeoutException(); + } else if (throwable instanceof PleaseHoldException + || throwable instanceof ServerNotRunningYetException) { + source.notRunningYetException(); } } }