From b841ae35a6ae94ce5b174cc198e11aa04b6eb048 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 18 Feb 2016 09:54:05 -0800 Subject: [PATCH] HBASE-15222 Use HDR histograms rather than hadoop or yammer's Summary: Use less contended things for metrics. For histogram which was the largest culprit we use HDR's histograms. For atomic long where possible we now use counter. Test Plan: unit tests Differential Revision: https://reviews.facebook.net/D54381 --- .../hadoop/hbase/util/FastLongHistogram.java | 1 + .../apache/hadoop/hbase/metrics/BaseSource.java | 8 -- .../apache/hadoop/metrics2/MetricHistogram.java | 2 + hbase-hadoop2-compat/pom.xml | 8 +- .../hbase/ipc/MetricsHBaseServerSourceImpl.java | 11 +- .../master/MetricsAssignmentManagerSourceImpl.java | 5 +- .../master/MetricsMasterFilesystemSourceImpl.java | 9 +- .../hbase/master/MetricsSnapshotSourceImpl.java | 8 +- .../master/balancer/MetricsBalancerSourceImpl.java | 4 +- .../hadoop/hbase/metrics/BaseSourceImpl.java | 7 - .../regionserver/MetricsRegionSourceImpl.java | 6 +- .../thrift/MetricsThriftServerSourceImpl.java | 11 +- .../metrics2/lib/DynamicMetricsRegistry.java | 34 ----- .../metrics2/lib/MetricMutableQuantiles.java | 154 --------------------- .../hadoop/metrics2/lib/MetricsExecutorImpl.java | 2 +- .../hadoop/metrics2/lib/MutableHistogram.java | 118 ++++------------ .../hadoop/metrics2/lib/MutableRangeHistogram.java | 22 +-- hbase-server/pom.xml | 4 + .../tmpl/regionserver/BlockCacheViewTmpl.jamon | 1 - .../apache/hadoop/hbase/io/hfile/AgeSnapshot.java | 34 ++--- .../hadoop/hbase/io/hfile/BlockCacheUtil.java | 28 ++-- .../apache/hadoop/hbase/io/hfile/CacheStats.java | 58 ++++---- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 9 +- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 2 +- .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 4 +- .../hbase/io/hfile/bucket/BucketCacheStats.java | 9 +- .../hbase/regionserver/StoreFileScanner.java | 13 +- pom.xml | 6 + 28 files changed, 166 insertions(+), 412 deletions(-) delete mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java index 623cbdb..696380b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; */ @InterfaceAudience.Public @InterfaceStability.Evolving +@Deprecated public class FastLongHistogram { /** * Bins is a class containing a list of buckets(or bins) for estimation histogram of some data. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index 3ab783a..f79aa9f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -80,14 +80,6 @@ public interface BaseSource { /** - * Add some value to a Quantile (An accurate histogram). - * - * @param name the name of the quantile - * @param value the value to add to the quantile - */ - void updateQuantile(String name, long value); - - /** * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver * diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java index b759efb..1aafb57 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java @@ -33,7 +33,9 @@ public interface MetricHistogram { String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile"; String NINETIETH_PERCENTILE_METRIC_NAME = "_90th_percentile"; String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile"; + String NINETY_EIGHTH_PERCENTILE_METRIC_NAME = "_98th_percentile"; String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile"; + String NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME = "_99.9th_percentile"; /** * Add a single value to a histogram's stream of values. diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml index 100a297..7bc6d66 100644 --- a/hbase-hadoop2-compat/pom.xml +++ b/hbase-hadoop2-compat/pom.xml @@ -162,6 +162,10 @@ limitations under the License. org.apache.hbase + hbase-common + + + org.apache.hbase hbase-hadoop-compat ${project.version} test-jar @@ -182,8 +186,8 @@ limitations under the License. ${hadoop-two.version} - io.dropwizard.metrics - metrics-core + org.hdrhistogram + HdrHistogram commons-lang diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index 487f9f5..2e1e44a 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.Interns; @@ -51,11 +52,11 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl private final MutableCounterLong exceptionsMultiTooLarge; - private MutableHistogram queueCallTime; - private MutableHistogram processCallTime; - private MutableHistogram totalCallTime; - private MutableHistogram requestSize; - private MutableHistogram responseSize; + private MetricHistogram queueCallTime; + private MetricHistogram processCallTime; + private MetricHistogram totalCallTime; + private MetricHistogram requestSize; + private MetricHistogram responseSize; public MetricsHBaseServerSourceImpl(String metricsName, String metricsDescription, diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java index ccf1c1d..59e957c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableHistogram; @@ -29,8 +30,8 @@ public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl implement private MutableGaugeLong ritGauge; private MutableGaugeLong ritCountOverThresholdGauge; private MutableGaugeLong ritOldestAgeGauge; - private MutableHistogram assignTimeHisto; - private MutableHistogram bulkAssignTimeHisto; + private MetricHistogram assignTimeHisto; + private MetricHistogram bulkAssignTimeHisto; public MetricsAssignmentManagerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java index 28414ea..bfd7bb0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java @@ -20,15 +20,16 @@ package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.lib.MutableHistogram; @InterfaceAudience.Private public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl implements MetricsMasterFileSystemSource { - private MutableHistogram splitSizeHisto; - private MutableHistogram splitTimeHisto; - private MutableHistogram metaSplitTimeHisto; - private MutableHistogram metaSplitSizeHisto; + private MetricHistogram splitSizeHisto; + private MetricHistogram splitTimeHisto; + private MetricHistogram metaSplitTimeHisto; + private MetricHistogram metaSplitSizeHisto; public MetricsMasterFilesystemSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java index c2fc6b9..bcefda0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java @@ -20,14 +20,14 @@ package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; -import org.apache.hadoop.metrics2.lib.MutableHistogram; +import org.apache.hadoop.metrics2.MetricHistogram; @InterfaceAudience.Private public class MetricsSnapshotSourceImpl extends BaseSourceImpl implements MetricsSnapshotSource { - private MutableHistogram snapshotTimeHisto; - private MutableHistogram snapshotCloneTimeHisto; - private MutableHistogram snapshotRestoreTimeHisto; + private MetricHistogram snapshotTimeHisto; + private MetricHistogram snapshotCloneTimeHisto; + private MetricHistogram snapshotRestoreTimeHisto; public MetricsSnapshotSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java index da34df2..4e1cf4b 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java @@ -20,13 +20,13 @@ package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; @InterfaceAudience.Private public class MetricsBalancerSourceImpl extends BaseSourceImpl implements MetricsBalancerSource{ - private MutableHistogram blanceClusterHisto; + private MetricHistogram blanceClusterHisto; private MutableCounterLong miscCount; public MetricsBalancerSourceImpl() { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java index 6756a21..cf9dffb 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java @@ -24,7 +24,6 @@ import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.impl.JmxCacheBuster; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; -import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableHistogram; @@ -132,12 +131,6 @@ public class BaseSourceImpl implements BaseSource, MetricsSource { histo.add(value); } - @Override - public void updateQuantile(String name, long value) { - MetricMutableQuantiles histo = metricsRegistry.getQuantile(name); - histo.add(value); - } - /** * Remove a named gauge. * diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index fab6b51..f980bef 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -23,11 +23,11 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; import org.apache.hadoop.metrics2.lib.Interns; import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableHistogram; @InterfaceAudience.Private public class MetricsRegionSourceImpl implements MetricsRegionSource { @@ -57,8 +57,8 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final MutableCounterLong regionDelete; private final MutableCounterLong regionIncrement; private final MutableCounterLong regionAppend; - private final MutableHistogram regionGet; - private final MutableHistogram regionScanNext; + private final MetricHistogram regionGet; + private final MetricHistogram regionScanNext; private final int hashCode; public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java index f9612e5..2555391 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableHistogram; @@ -32,12 +33,12 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram; public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements MetricsThriftServerSource { - private MutableHistogram batchGetStat; - private MutableHistogram batchMutateStat; - private MutableHistogram queueTimeStat; + private MetricHistogram batchGetStat; + private MetricHistogram batchMutateStat; + private MetricHistogram queueTimeStat; - private MutableHistogram thriftCallStat; - private MutableHistogram thriftSlowCallStat; + private MetricHistogram thriftCallStat; + private MetricHistogram thriftSlowCallStat; private MutableGaugeLong callQueueLenGauge; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index ee13c76..5bb4dd9 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -326,19 +326,6 @@ public class DynamicMetricsRegistry { return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class); } - /** - * Create a new MutableQuantile(A more accurate histogram). - * @param name The name of the histogram - * @return a new MutableQuantile - */ - public MetricMutableQuantiles newQuantile(String name) { - return newQuantile(name, ""); - } - - public MetricMutableQuantiles newQuantile(String name, String desc) { - MetricMutableQuantiles histo = new MetricMutableQuantiles(name, desc, "Ops", "", 60); - return addNewMetricIfAbsent(name, histo, MetricMutableQuantiles.class); - } synchronized void add(String name, MutableMetric metric) { addNewMetricIfAbsent(name, metric, MutableMetric.class); @@ -552,27 +539,6 @@ public class DynamicMetricsRegistry { return (MutableHistogram) histo; } - public MetricMutableQuantiles getQuantile(String histoName) { - //See getLongGauge for description on how this works. - MutableMetric histo = metricsMap.get(histoName); - if (histo == null) { - MetricMutableQuantiles newCounter = - new MetricMutableQuantiles(histoName, "", "Ops", "", 60); - histo = metricsMap.putIfAbsent(histoName, newCounter); - if (histo == null) { - return newCounter; - } - } - - - if (!(histo instanceof MetricMutableQuantiles)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - histoName + " and not of type MutableHistogram"); - } - - return (MetricMutableQuantiles) histo; - } - private T addNewMetricIfAbsent(String name, T ret, diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java deleted file mode 100644 index c03654b..0000000 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.metrics2.lib; - -import static org.apache.hadoop.metrics2.lib.Interns.info; - -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricHistogram; -import org.apache.hadoop.metrics2.MetricsExecutor; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.util.MetricQuantile; -import org.apache.hadoop.metrics2.util.MetricSampleQuantiles; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Watches a stream of long values, maintaining online estimates of specific quantiles with provably - * low error bounds. This is particularly useful for accurate high-percentile (e.g. 95th, 99th) - * latency metrics. - */ -@InterfaceAudience.Private -public class MetricMutableQuantiles extends MutableMetric implements MetricHistogram { - - static final MetricQuantile[] quantiles = {new MetricQuantile(0.50, 0.050), - new MetricQuantile(0.75, 0.025), new MetricQuantile(0.90, 0.010), - new MetricQuantile(0.95, 0.005), new MetricQuantile(0.99, 0.001)}; - - private final MetricsInfo numInfo; - private final MetricsInfo[] quantileInfos; - private final int interval; - - private MetricSampleQuantiles estimator; - private long previousCount = 0; - private MetricsExecutor executor; - - - @VisibleForTesting - protected Map previousSnapshot = null; - - /** - * Instantiates a new {@link MetricMutableQuantiles} for a metric that rolls itself over on the - * specified time interval. - * - * @param name of the metric - * @param description long-form textual description of the metric - * @param sampleName type of items in the stream (e.g., "Ops") - * @param valueName type of the values - * @param interval rollover interval (in seconds) of the estimator - */ - public MetricMutableQuantiles(String name, String description, String sampleName, - String valueName, int interval) { - String ucName = StringUtils.capitalize(name); - String usName = StringUtils.capitalize(sampleName); - String uvName = StringUtils.capitalize(valueName); - String desc = StringUtils.uncapitalize(description); - String lsName = StringUtils.uncapitalize(sampleName); - String lvName = StringUtils.uncapitalize(valueName); - - numInfo = info(ucName + "Num" + usName, String.format( - "Number of %s for %s with %ds interval", lsName, desc, interval)); - // Construct the MetricsInfos for the quantiles, converting to percentiles - quantileInfos = new MetricsInfo[quantiles.length]; - String nameTemplate = "%s%dthPercentile%dsInterval%s"; - String descTemplate = "%d percentile %s with %d second interval for %s"; - for (int i = 0; i < quantiles.length; i++) { - int percentile = (int) (100 * quantiles[i].quantile); - quantileInfos[i] = info(String.format(nameTemplate, ucName, percentile, interval, uvName), - String.format(descTemplate, percentile, lvName, interval, desc)); - } - - estimator = new MetricSampleQuantiles(quantiles); - executor = new MetricsExecutorImpl(); - this.interval = interval; - executor.getExecutor().scheduleAtFixedRate(new RolloverSample(this), - interval, - interval, - TimeUnit.SECONDS); - } - - @Override - public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) { - if (all || changed()) { - builder.addGauge(numInfo, previousCount); - for (int i = 0; i < quantiles.length; i++) { - long newValue = 0; - // If snapshot is null, we failed to update since the window was empty - if (previousSnapshot != null) { - newValue = previousSnapshot.get(quantiles[i]); - } - builder.addGauge(quantileInfos[i], newValue); - } - if (changed()) { - clearChanged(); - } - } - } - - public synchronized void add(long value) { - estimator.insert(value); - } - - public int getInterval() { - return interval; - } - - /** Runnable used to periodically roll over the internal {@link org.apache.hadoop.metrics2.util.MetricSampleQuantiles} every interval. */ - private static class RolloverSample implements Runnable { - - MetricMutableQuantiles parent; - - public RolloverSample(MetricMutableQuantiles parent) { - this.parent = parent; - } - - @Override - public void run() { - synchronized (parent) { - try { - parent.previousCount = parent.estimator.getCount(); - parent.previousSnapshot = parent.estimator.snapshot(); - } catch (IOException e) { - // Couldn't get a new snapshot because the window was empty - parent.previousCount = 0; - parent.previousSnapshot = null; - } - parent.estimator.clear(); - } - parent.setChanged(); - } - - } -} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java index f70413e..c381609 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java @@ -28,7 +28,7 @@ import org.apache.hadoop.metrics2.MetricsExecutor; /** * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by - * MetricMutableQuantiles{@link MetricMutableQuantiles}, MetricsRegionAggregateSourceImpl, and + * MetricsRegionAggregateSourceImpl, and * JmxCacheBuster */ @InterfaceAudience.Private diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java index c7ff940..81a6987 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java @@ -18,36 +18,24 @@ package org.apache.hadoop.metrics2.lib; -import java.util.concurrent.atomic.AtomicLong; - +import org.HdrHistogram.Histogram; +import org.HdrHistogram.Recorder; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import com.codahale.metrics.ExponentiallyDecayingReservoir; -import com.codahale.metrics.Reservoir; -import com.codahale.metrics.Snapshot; - /** * A histogram implementation that runs in constant space, and exports to hadoop2's metrics2 system. */ @InterfaceAudience.Private public class MutableHistogram extends MutableMetric implements MetricHistogram { - - private static final int DEFAULT_SAMPLE_SIZE = 2046; - // the bias towards sampling from more recent data. - // Per Cormode et al. an alpha of 0.015 strongly biases to the last 5 minutes - private static final double DEFAULT_ALPHA = 0.015; - + protected final Recorder recorder; protected final String name; protected final String desc; - private final Reservoir reservoir; - private final AtomicLong min; - private final AtomicLong max; - private final AtomicLong sum; - private final AtomicLong count; + protected final Counter count = new Counter(); public MutableHistogram(MetricsInfo info) { this(info.name(), info.description()); @@ -56,86 +44,40 @@ public class MutableHistogram extends MutableMetric implements MetricHistogram { public MutableHistogram(String name, String description) { this.name = StringUtils.capitalize(name); this.desc = StringUtils.uncapitalize(description); - reservoir = new ExponentiallyDecayingReservoir(DEFAULT_SAMPLE_SIZE, DEFAULT_ALPHA); - count = new AtomicLong(); - min = new AtomicLong(Long.MAX_VALUE); - max = new AtomicLong(Long.MIN_VALUE); - sum = new AtomicLong(); + this.recorder = new Recorder(5); } public void add(final long val) { - setChanged(); - count.incrementAndGet(); - reservoir.update(val); - setMax(val); - setMin(val); - sum.getAndAdd(val); - } - - private void setMax(final long potentialMax) { - boolean done = false; - while (!done) { - final long currentMax = max.get(); - done = currentMax >= potentialMax - || max.compareAndSet(currentMax, potentialMax); - } - } - - private void setMin(long potentialMin) { - boolean done = false; - while (!done) { - final long currentMin = min.get(); - done = currentMin <= potentialMin - || min.compareAndSet(currentMin, potentialMin); - } - } - - public long getMax() { - if (count.get() > 0) { - return max.get(); - } - return 0L; - } - - public long getMin() { - if (count.get() > 0) { - return min.get(); - } - return 0L; - } - - public double getMean() { - long cCount = count.get(); - if (cCount > 0) { - return sum.get() / (double) cCount; - } - return 0.0; + count.increment(); + recorder.recordValue(val); } @Override public void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { - if (all || changed()) { - clearChanged(); - updateSnapshotMetrics(metricsRecordBuilder); - } + Histogram histo = recorder.getIntervalHistogram(); + updateSnapshotMetrics(metricsRecordBuilder, histo); } - public void updateSnapshotMetrics(MetricsRecordBuilder metricsRecordBuilder) { - final Snapshot s = reservoir.getSnapshot(); - metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc), count.get()); + protected void updateSnapshotMetrics(MetricsRecordBuilder metricsRecordBuilder, Histogram histo) { + metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc), count.get()); + metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), histo.getMinValue()); + metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), histo.getMaxValue()); + metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), histo.getMean()); - metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), getMin()); - metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), getMax()); - metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), getMean()); - - metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc), s.getMedian()); - metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - s.get75thPercentile()); - metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc), - s.getValue(0.90)); - metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc), - s.get95thPercentile()); - metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc), - s.get99thPercentile()); + metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc), + histo.getValueAtPercentile(50)); + metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), + histo.getValueAtPercentile(75)); + metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc), + histo.getValueAtPercentile(90)); + metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc), + histo.getValueAtPercentile(95)); + metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc), + histo.getValueAtPercentile(98)); + metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc), + histo.getValueAtPercentile(99)); + metricsRecordBuilder.addGauge(Interns.info( + name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), + histo.getValueAtPercentile(99.9)); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java index ac1f497..4f2c881 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java @@ -20,7 +20,9 @@ package org.apache.hadoop.metrics2.lib; import java.util.concurrent.atomic.AtomicLongArray; +import org.HdrHistogram.Histogram; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -28,14 +30,14 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; * Extended histogram implementation with metric range counters. */ @InterfaceAudience.Private -public abstract class MutableRangeHistogram extends MutableHistogram { +public abstract class MutableRangeHistogram extends MutableHistogram implements MetricHistogram { public MutableRangeHistogram(MetricsInfo info) { this(info.name(), info.description()); } public MutableRangeHistogram(String name, String description) { - super(name, description); + super(name, description); } /** @@ -61,19 +63,19 @@ public abstract class MutableRangeHistogram extends MutableHistogram { private void updateBand(final long val) { int i; - for (i=0; i getRange()[i]; i++); - getRangeVals().incrementAndGet(i); + long[] ranges = getRange(); + for (i=0; i< ranges.length && val > ranges[i]; i++) { + getRangeVals().incrementAndGet(i); + } } @Override public void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { - if (all || changed()) { - clearChanged(); - updateSnapshotMetrics(metricsRecordBuilder); - updateSnapshotRangeMetrics(metricsRecordBuilder); - } + Histogram histo = recorder.getIntervalHistogram(); + updateSnapshotMetrics(metricsRecordBuilder, histo); + updateSnapshotRangeMetrics(metricsRecordBuilder); } - + public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder) { long prior = 0; for (int i = 0; i < getRange().length; i++) { diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index d5f1e30..ddf5987 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -439,6 +439,10 @@ metrics-core + org.hdrhistogram + HdrHistogram + + com.google.guava guava diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon index c23cf75..fa55f6a 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon @@ -37,7 +37,6 @@ org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator; org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket; org.apache.hadoop.util.StringUtils; -com.codahale.metrics.Snapshot; <%java> BlockCache bc = cacheConfig == null ? null : cacheConfig.getBlockCache(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java index 79acec0..774d304 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java @@ -17,56 +17,56 @@ */ package org.apache.hadoop.hbase.io.hfile; +import org.HdrHistogram.Histogram; import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import com.codahale.metrics.Histogram; -import com.codahale.metrics.Snapshot; - /** * Snapshot of block cache age in cache. * This object is preferred because we can control how it is serialized out when JSON'ing. */ @JsonIgnoreProperties({"ageHistogram", "snapshot"}) public class AgeSnapshot { - private final Snapshot snapshot; + + private final Histogram ageHistogram; AgeSnapshot(final Histogram ageHistogram) { - this.snapshot = ageHistogram.getSnapshot(); + this.ageHistogram = ageHistogram; } public double get75thPercentile() { - return snapshot.get75thPercentile(); + return ageHistogram.getValueAtPercentile(75); } public double get95thPercentile() { - return snapshot.get95thPercentile(); + return ageHistogram.getValueAtPercentile(95); } public double get98thPercentile() { - return snapshot.get98thPercentile(); - } - - public double get999thPercentile() { - return snapshot.get999thPercentile(); + return ageHistogram.getValueAtPercentile(98); } public double get99thPercentile() { - return snapshot.get99thPercentile(); + return ageHistogram.getValueAtPercentile(99); } + public double get999thPercentile() { + return ageHistogram.getValueAtPercentile(99.9); + } + + public double getMean() { - return this.snapshot.getMean(); + return this.ageHistogram.getMean(); } public double getMax() { - return snapshot.getMax(); + return this.ageHistogram.getMaxValue(); } public double getMin() { - return snapshot.getMin(); + return this.ageHistogram.getMinValue(); } public double getStdDev() { - return snapshot.getStdDev(); + return this.ageHistogram.getStdDeviation(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index d81871f..ecd0e94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -23,6 +23,8 @@ import java.util.NavigableSet; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ConcurrentSkipListSet; +import org.HdrHistogram.Histogram; +import org.HdrHistogram.Recorder; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.codehaus.jackson.JsonGenerationException; @@ -31,11 +33,6 @@ import org.codehaus.jackson.map.JsonMappingException; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.SerializationConfig; -import com.codahale.metrics.Histogram; -import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.Snapshot; - -import static com.codahale.metrics.MetricRegistry.name; /** * Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches. @@ -44,11 +41,6 @@ import static com.codahale.metrics.MetricRegistry.name; @InterfaceAudience.Private public class BlockCacheUtil { /** - * Needed making histograms. - */ - private static final MetricRegistry METRICS = new MetricRegistry(); - - /** * Needed generating JSON. */ private static final ObjectMapper MAPPER = new ObjectMapper(); @@ -206,7 +198,7 @@ public class BlockCacheUtil { */ private NavigableMap> cachedBlockByFile = new ConcurrentSkipListMap>(); - Histogram age = METRICS.histogram(name(CachedBlocksByFile.class, "age")); + Recorder age = new Recorder(5); /** * @param cb @@ -228,7 +220,7 @@ public class BlockCacheUtil { this.dataSize += cb.getSize(); } long age = this.now - cb.getCachedTime(); - this.age.update(age); + this.age.recordValue(age); return false; } @@ -271,18 +263,18 @@ public class BlockCacheUtil { } public AgeSnapshot getAgeInCacheSnapshot() { - return new AgeSnapshot(this.age); + return new AgeSnapshot(this.age.getIntervalHistogram()); } @Override public String toString() { - Snapshot snapshot = age.getSnapshot(); + Histogram snapshot = age.getIntervalHistogram(); return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size + ", dataSize=" + getDataSize() + - ", mean age=" + snapshot.getMean() + ", stddev age=" + snapshot.getStdDev() + - ", min age=" + snapshot.getMin() + ", max age=" + snapshot.getMax() + - ", 95th percentile age=" + snapshot.get95thPercentile() + - ", 99th percentile age=" + snapshot.get99thPercentile(); + ", mean age=" + snapshot.getMean() + ", stddev age=" + snapshot.getStdDeviation() + + ", min age=" + snapshot.getMinValue() + ", max age=" + snapshot.getMaxValue() + + ", 95th percentile age=" + snapshot.getValueAtPercentile(95) + + ", 99th percentile age=" + snapshot.getValueAtPercentile(99); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 50e8bbb..673fc1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -18,24 +18,20 @@ */ package org.apache.hadoop.hbase.io.hfile; -import java.util.concurrent.atomic.AtomicLong; - +import org.HdrHistogram.Recorder; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import com.codahale.metrics.Histogram; -import com.codahale.metrics.MetricRegistry; +import org.apache.hadoop.hbase.util.Counter; + +import java.util.concurrent.atomic.AtomicLong; + -import static com.codahale.metrics.MetricRegistry.name; /** * Class that implements cache metrics. */ @InterfaceAudience.Private public class CacheStats { - /** - * Needed making histograms. - */ - private static final MetricRegistry METRICS = new MetricRegistry(); /** Sliding window statistics. The number of metric periods to include in * sliding window hit ratio calculations. @@ -43,10 +39,10 @@ public class CacheStats { static final int DEFAULT_WINDOW_PERIODS = 5; /** The number of getBlock requests that were cache hits */ - private final AtomicLong hitCount = new AtomicLong(0); + private final Counter hitCount = new Counter(); /** The number of getBlock requests that were cache hits from primary replica */ - private final AtomicLong primaryHitCount = new AtomicLong(0); + private final Counter primaryHitCount = new Counter(); /** * The number of getBlock requests that were cache hits, but only from @@ -54,27 +50,27 @@ public class CacheStats { * attempt to read from the block cache even if they will not put new blocks * into the block cache. See HBASE-2253 for more information. */ - private final AtomicLong hitCachingCount = new AtomicLong(0); + private final Counter hitCachingCount = new Counter(); /** The number of getBlock requests that were cache misses */ - private final AtomicLong missCount = new AtomicLong(0); + private final Counter missCount = new Counter(); /** The number of getBlock requests for primary replica that were cache misses */ - private final AtomicLong primaryMissCount = new AtomicLong(0); + private final Counter primaryMissCount = new Counter(); /** * The number of getBlock requests that were cache misses, but only from * requests that were set to use the block cache. */ - private final AtomicLong missCachingCount = new AtomicLong(0); + private final Counter missCachingCount = new Counter(); /** The number of times an eviction has occurred */ - private final AtomicLong evictionCount = new AtomicLong(0); + private final Counter evictionCount = new Counter(); /** The total number of blocks that have been evicted */ - private final AtomicLong evictedBlockCount = new AtomicLong(0); + private final Counter evictedBlockCount = new Counter(); /** The total number of blocks for primary replica that have been evicted */ - private final AtomicLong primaryEvictedBlockCount = new AtomicLong(0); + private final Counter primaryEvictedBlockCount = new Counter(); /** The total number of blocks that were not inserted. */ private final AtomicLong failedInserts = new AtomicLong(0); @@ -102,7 +98,7 @@ public class CacheStats { /** * Keep running age at eviction time */ - private Histogram ageAtEviction; + private Recorder ageAtEviction; private long startTime = System.nanoTime(); public CacheStats(final String name) { @@ -115,7 +111,7 @@ public class CacheStats { this.hitCachingCounts = initializeZeros(numPeriodsInWindow); this.requestCounts = initializeZeros(numPeriodsInWindow); this.requestCachingCounts = initializeZeros(numPeriodsInWindow); - this.ageAtEviction = METRICS.histogram(name(CacheStats.class, name + ".ageAtEviction")); + this.ageAtEviction = new Recorder(5); } @Override @@ -132,9 +128,9 @@ public class CacheStats { } public void miss(boolean caching, boolean primary) { - missCount.incrementAndGet(); - if (primary) primaryMissCount.incrementAndGet(); - if (caching) missCachingCount.incrementAndGet(); + missCount.increment(); + if (primary) primaryMissCount.increment(); + if (caching) missCachingCount.increment(); } public void hit(boolean caching) { @@ -142,20 +138,20 @@ public class CacheStats { } public void hit(boolean caching, boolean primary) { - hitCount.incrementAndGet(); - if (primary) primaryHitCount.incrementAndGet(); - if (caching) hitCachingCount.incrementAndGet(); + hitCount.increment(); + if (primary) primaryHitCount.increment(); + if (caching) hitCachingCount.increment(); } public void evict() { - evictionCount.incrementAndGet(); + evictionCount.increment(); } public void evicted(final long t, boolean primary) { - if (t > this.startTime) this.ageAtEviction.update(t - this.startTime); - this.evictedBlockCount.incrementAndGet(); + if (t > this.startTime) this.ageAtEviction.recordValue(t - this.startTime); + this.evictedBlockCount.increment(); if (primary) { - primaryEvictedBlockCount.incrementAndGet(); + primaryEvictedBlockCount.increment(); } } @@ -274,7 +270,7 @@ public class CacheStats { } public AgeSnapshot getAgeAtEvictionSnapshot() { - return new AgeSnapshot(this.ageAtEviction); + return new AgeSnapshot(this.ageAtEviction.getIntervalHistogram()); } private static long sum(long [] counts) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 1e1835f..50d2044 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.Writable; @@ -179,17 +180,19 @@ public class HFile { */ public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024; // For measuring number of checksum failures - static final AtomicLong checksumFailures = new AtomicLong(); + static final Counter checksumFailures = new Counter(); // for test purpose - public static final AtomicLong dataBlockReadCnt = new AtomicLong(0); + public static final Counter dataBlockReadCnt = new Counter(); /** * Number of checksum verification failures. It also * clears the counter. */ public static final long getChecksumFailuresCount() { - return checksumFailures.getAndSet(0); + long count = checksumFailures.get(); + checksumFailures.set(0); + return count; } /** API required to write an {@link HFile} */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index e7a1e5e..e2f524c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1522,7 +1522,7 @@ public class HFileBlock implements Cacheable { HFile.LOG.warn(msg); throw new IOException(msg); // cannot happen case here } - HFile.checksumFailures.incrementAndGet(); // update metrics + HFile.checksumFailures.increment(); // update metrics // If we have a checksum failure, we fall back into a mode where // the next few reads use HDFS level checksums. We aim to make the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index b2f5ded..239c63d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1497,7 +1497,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { assert cachedBlock.isUnpacked() : "Packed block leak."; if (cachedBlock.getBlockType().isData()) { if (updateCacheMetrics) { - HFile.dataBlockReadCnt.incrementAndGet(); + HFile.dataBlockReadCnt.increment(); } // Validate encoding type for data blocks. We include encoding // type in the cache key, and we expect it to match on a cache hit. @@ -1537,7 +1537,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { } if (updateCacheMetrics && hfileBlock.getBlockType().isData()) { - HFile.dataBlockReadCnt.incrementAndGet(); + HFile.dataBlockReadCnt.increment(); } return unpacked; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java index 51e6268..9b10957 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java @@ -22,6 +22,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.CacheStats; +import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** @@ -29,8 +30,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; */ @InterfaceAudience.Private public class BucketCacheStats extends CacheStats { - private final AtomicLong ioHitCount = new AtomicLong(0); - private final AtomicLong ioHitTime = new AtomicLong(0); + private final Counter ioHitCount = new Counter(0); + private final Counter ioHitTime = new Counter(0); private final static int nanoTime = 1000000; private long lastLogTime = EnvironmentEdgeManager.currentTime(); @@ -45,8 +46,8 @@ public class BucketCacheStats extends CacheStats { } public void ioHit(long time) { - ioHitCount.incrementAndGet(); - ioHitTime.addAndGet(time); + ioHitCount.increment(); + ioHitTime.add(time); } public long getIOHitsPerSecond() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index d752e17..aa7ce0f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; +import org.apache.hadoop.hbase.util.Counter; /** * KeyValueScanner adaptor over the Reader. It also provides hooks into @@ -60,7 +61,7 @@ public class StoreFileScanner implements KeyValueScanner { // if have encountered the next row. Only used for reversed scan private boolean stopSkippingKVsIfNextRow = false; - private static AtomicLong seekCount; + private static Counter seekCount; private ScanQueryMatcher matcher; @@ -164,7 +165,7 @@ public class StoreFileScanner implements KeyValueScanner { } public boolean seek(Cell key) throws IOException { - if (seekCount != null) seekCount.incrementAndGet(); + if (seekCount != null) seekCount.increment(); try { try { @@ -191,7 +192,7 @@ public class StoreFileScanner implements KeyValueScanner { } public boolean reseek(Cell key) throws IOException { - if (seekCount != null) seekCount.incrementAndGet(); + if (seekCount != null) seekCount.increment(); try { try { @@ -424,7 +425,7 @@ public class StoreFileScanner implements KeyValueScanner { return seekCount.get(); } static final void instrument() { - seekCount = new AtomicLong(); + seekCount = new Counter(); } @Override @@ -447,7 +448,7 @@ public class StoreFileScanner implements KeyValueScanner { Cell key = originalKey; do { Cell seekKey = CellUtil.createFirstOnRow(key); - if (seekCount != null) seekCount.incrementAndGet(); + if (seekCount != null) seekCount.increment(); if (!hfs.seekBefore(seekKey)) { this.cur = null; return false; @@ -455,7 +456,7 @@ public class StoreFileScanner implements KeyValueScanner { Cell curCell = hfs.getCell(); Cell firstKeyOfPreviousRow = CellUtil.createFirstOnRow(curCell); - if (seekCount != null) seekCount.incrementAndGet(); + if (seekCount != null) seekCount.increment(); if (!seekAtOrAfter(hfs, firstKeyOfPreviousRow)) { this.cur = null; return false; diff --git a/pom.xml b/pom.xml index af49452..57f4e5a 100644 --- a/pom.xml +++ b/pom.xml @@ -1188,6 +1188,7 @@ 3.2.2 3.1 3.1.2 + 2.1.8 12.0.1 1.3.9 1.9.13 @@ -1447,6 +1448,11 @@ ${metrics-core.version} + org.hdrhistogram + HdrHistogram + ${hdr-histogram.version} + + com.google.guava guava ${guava.version} -- 2.7.0