diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index f74c4f7..81883f2 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -109,6 +109,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.metrics.MetricsStorage; import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -336,86 +337,6 @@ public class HRegion implements HeapSize { // , Writable{ private HTableDescriptor htableDescriptor = null; private RegionSplitPolicy splitPolicy; - // for simple numeric metrics (# of blocks read from block cache) - public static final ConcurrentMap numericMetrics = new ConcurrentHashMap(); - - public static final String METRIC_GETSIZE = "getsize"; - public static final String METRIC_NEXTSIZE = "nextsize"; - - // for simple numeric metrics (current block cache size) - // These ones are not reset to zero when queried, unlike the previous. - public static final ConcurrentMap numericPersistentMetrics = new ConcurrentHashMap(); - - /** - * Used for metrics where we want track a metrics (such as latency) over a - * number of operations. - */ - public static final ConcurrentMap> - timeVaryingMetrics = new ConcurrentHashMap>(); - - public static void incrNumericMetric(String key, long amount) { - AtomicLong oldVal = numericMetrics.get(key); - if (oldVal == null) { - oldVal = numericMetrics.putIfAbsent(key, new AtomicLong(amount)); - if (oldVal == null) - return; - } - oldVal.addAndGet(amount); - } - - public static void setNumericMetric(String key, long amount) { - numericMetrics.put(key, new AtomicLong(amount)); - } - - public static void incrTimeVaryingMetric(String key, long amount) { - Pair oldVal = timeVaryingMetrics.get(key); - if (oldVal == null) { - oldVal = timeVaryingMetrics.putIfAbsent(key, - new Pair(new AtomicLong(amount), - new AtomicInteger(1))); - if (oldVal == null) - return; - } - oldVal.getFirst().addAndGet(amount); // total time - oldVal.getSecond().incrementAndGet(); // increment ops by 1 - } - - public static void incrNumericPersistentMetric(String key, long amount) { - AtomicLong oldVal = numericPersistentMetrics.get(key); - if (oldVal == null) { - oldVal = numericPersistentMetrics - .putIfAbsent(key, new AtomicLong(amount)); - if (oldVal == null) - return; - } - oldVal.addAndGet(amount); - } - - public static long getNumericMetric(String key) { - AtomicLong m = numericMetrics.get(key); - if (m == null) - return 0; - return m.get(); - } - - public static Pair getTimeVaryingMetric(String key) { - Pair pair = timeVaryingMetrics.get(key); - if (pair == null) { - return new Pair(0L, 0); - } - - return new Pair(pair.getFirst().get(), - pair.getSecond().get()); - } - - static long getNumericPersistentMetric(String key) { - AtomicLong m = numericPersistentMetrics.get(key); - if (m == null) - return 0; - return m.get(); - } - /** * Should only be used for testing purposes */ @@ -1891,7 +1812,7 @@ public class HRegion implements HeapSize { // , Writable{ final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( getTableDesc().getNameAsString(), familyMap.keySet()); if (!metricPrefix.isEmpty()) { - HRegion.incrTimeVaryingMetric(metricPrefix + "delete_", after - now); + MetricsStorage.incrTimeVaryingMetric(metricPrefix + "delete_", after - now); } if (flush) { @@ -2282,7 +2203,7 @@ public class HRegion implements HeapSize { // , Writable{ if (metricPrefix == null) { metricPrefix = SchemaMetrics.CF_BAD_FAMILY_PREFIX; } - HRegion.incrTimeVaryingMetric(metricPrefix + "multiput_", + MetricsStorage.incrTimeVaryingMetric(metricPrefix + "multiput_", endTimeMs - startTimeMs); if (!success) { @@ -2541,7 +2462,7 @@ public class HRegion implements HeapSize { // , Writable{ final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( this.getTableDesc().getNameAsString(), familyMap.keySet()); if (!metricPrefix.isEmpty()) { - HRegion.incrTimeVaryingMetric(metricPrefix + "put_", after - now); + MetricsStorage.incrTimeVaryingMetric(metricPrefix + "put_", after - now); } if (flush) { @@ -4156,7 +4077,7 @@ public class HRegion implements HeapSize { // , Writable{ RegionScanner scanner = null; try { scanner = getScanner(scan); - scanner.next(results, HRegion.METRIC_GETSIZE); + scanner.next(results, SchemaMetrics.METRIC_GETSIZE); } finally { if (scanner != null) scanner.close(); @@ -4172,7 +4093,7 @@ public class HRegion implements HeapSize { // , Writable{ final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( this.getTableDesc().getNameAsString(), get.familySet()); if (!metricPrefix.isEmpty()) { - HRegion.incrTimeVaryingMetric(metricPrefix + "get_", after - now); + MetricsStorage.incrTimeVaryingMetric(metricPrefix + "get_", after - now); } return results; @@ -4246,14 +4167,14 @@ public class HRegion implements HeapSize { // , Writable{ processor.postProcess(this, walEdit); } catch (IOException e) { long endNanoTime = System.nanoTime(); - HRegion.incrTimeVaryingMetric(metricsName + ".error.nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano", endNanoTime - startNanoTime); throw e; } finally { closeRegionOperation(); } final long endNanoTime = System.nanoTime(); - HRegion.incrTimeVaryingMetric(metricsName + ".nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".nano", endNanoTime - startNanoTime); return; } @@ -4363,7 +4284,7 @@ public class HRegion implements HeapSize { // , Writable{ } catch (IOException e) { long endNanoTime = System.nanoTime(); - HRegion.incrTimeVaryingMetric(metricsName + ".error.nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano", endNanoTime - startNanoTime); throw e; } finally { @@ -4375,19 +4296,19 @@ public class HRegion implements HeapSize { // , Writable{ } // Populate all metrics long endNanoTime = System.nanoTime(); - HRegion.incrTimeVaryingMetric(metricsName + ".nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".nano", endNanoTime - startNanoTime); - HRegion.incrTimeVaryingMetric(metricsName + ".acquirelock.nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".acquirelock.nano", lockedNanoTime - startNanoTime); - HRegion.incrTimeVaryingMetric(metricsName + ".process.nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".process.nano", processDoneNanoTime - lockedNanoTime); - HRegion.incrTimeVaryingMetric(metricsName + ".occupylock.nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".occupylock.nano", unlockedNanoTime - lockedNanoTime); - HRegion.incrTimeVaryingMetric(metricsName + ".sync.nano", + MetricsStorage.incrTimeVaryingMetric(metricsName + ".sync.nano", endNanoTime - unlockedNanoTime); } @@ -4784,7 +4705,7 @@ public class HRegion implements HeapSize { // , Writable{ long after = EnvironmentEdgeManager.currentTimeMillis(); String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( getTableDesc().getName(), family); - HRegion.incrTimeVaryingMetric(metricPrefix + "increment_", after - before); + MetricsStorage.incrTimeVaryingMetric(metricPrefix + "increment_", after - before); if (flush) { // Request a cache flush. Do it outside update lock. diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 1c0541f..b7ceb1e 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -125,6 +125,7 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler; +import org.apache.hadoop.hbase.regionserver.metrics.MetricsStorage; import org.apache.hadoop.hbase.regionserver.metrics.RegionServerDynamicMetrics; import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; @@ -1256,7 +1257,7 @@ public class HRegionServer extends RegionServer } for (Entry e : tempVals.entrySet()) { - HRegion.setNumericMetric(e.getKey(), e.getValue().longValue()); + MetricsStorage.setNumericMetric(e.getKey(), e.getValue().longValue()); } this.metrics.stores.set(stores); @@ -2245,7 +2246,7 @@ public class HRegionServer extends RegionServer && currentScanResultSize < maxScannerResultSize; i++) { requestCount.incrementAndGet(); // Collect values to be returned here - boolean moreRows = s.next(values, HRegion.METRIC_NEXTSIZE); + boolean moreRows = s.next(values, SchemaMetrics.METRIC_NEXTSIZE); if (!values.isEmpty()) { for (KeyValue kv : values) { currentScanResultSize += kv.heapSize(); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java index 9487a1c..7c59995 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java @@ -82,6 +82,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.regionserver.HRegionServer.QosPriority; import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; +import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; @@ -681,7 +682,7 @@ public abstract class RegionServer implements for (int i = 0; i < rows && currentScanResultSize < maxScannerResultSize; i++) { // Collect values to be returned here - boolean moreRows = scanner.next(values, HRegion.METRIC_NEXTSIZE); + boolean moreRows = scanner.next(values, SchemaMetrics.METRIC_NEXTSIZE); if (!values.isEmpty()) { for (KeyValue kv : values) { currentScanResultSize += kv.heapSize(); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 919d814..eeb4543 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.regionserver.metrics.MetricsStorage; import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -373,7 +374,7 @@ class StoreScanner extends NonLazyKeyValueScanner results.add(kv); if (metric != null) { - HRegion.incrNumericMetric(this.metricNamePrefix + metric, + MetricsStorage.incrNumericMetric(this.metricNamePrefix + metric, kv.getLength()); } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/MetricsStorage.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/MetricsStorage.java new file mode 100644 index 0000000..0584581 --- /dev/null +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/MetricsStorage.java @@ -0,0 +1,85 @@ +package org.apache.hadoop.hbase.regionserver.metrics; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.hbase.util.Pair; + +public class MetricsStorage { + + // for simple numeric metrics (# of blocks read from block cache) + public static final ConcurrentMap numericMetrics = new ConcurrentHashMap(); + + // for simple numeric metrics (current block cache size) + // These ones are not reset to zero when queried, unlike the previous. + public static final ConcurrentMap numericPersistentMetrics = new ConcurrentHashMap(); + + /** + * Used for metrics where we want track a metrics (such as latency) over a + * number of operations. + */ + public static final ConcurrentMap> timeVaryingMetrics = new ConcurrentHashMap>(); + + public static void incrNumericMetric(String key, long amount) { + AtomicLong oldVal = numericMetrics.get(key); + if (oldVal == null) { + oldVal = numericMetrics.putIfAbsent(key, new AtomicLong(amount)); + if (oldVal == null) + return; + } + oldVal.addAndGet(amount); + } + + public static void incrTimeVaryingMetric(String key, long amount) { + Pair oldVal = timeVaryingMetrics.get(key); + if (oldVal == null) { + oldVal = timeVaryingMetrics.putIfAbsent(key, new Pair( + new AtomicLong(amount), new AtomicInteger(1))); + if (oldVal == null) + return; + } + oldVal.getFirst().addAndGet(amount); // total time + oldVal.getSecond().incrementAndGet(); // increment ops by 1 + } + + + public static void incrNumericPersistentMetric(String key, long amount) { + AtomicLong oldVal = numericPersistentMetrics.get(key); + if (oldVal == null) { + oldVal = numericPersistentMetrics.putIfAbsent(key, new AtomicLong(amount)); + if (oldVal == null) + return; + } + oldVal.addAndGet(amount); + } + + public static void setNumericMetric(String key, long amount) { + numericMetrics.put(key, new AtomicLong(amount)); + } + + public static long getNumericMetric(String key) { + AtomicLong m = numericMetrics.get(key); + if (m == null) + return 0; + return m.get(); + } + + public static Pair getTimeVaryingMetric(String key) { + Pair pair = timeVaryingMetrics.get(key); + if (pair == null) { + return new Pair(0L, 0); + } + + return new Pair(pair.getFirst().get(), pair.getSecond().get()); + } + + static long getNumericPersistentMetric(String key) { + AtomicLong m = numericPersistentMetrics.get(key); + if (m == null) + return 0; + return m.get(); + } + +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java index 3b46dd8..d373367 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java @@ -133,17 +133,17 @@ public class RegionServerDynamicMetrics implements Updater { */ public void doUpdates(MetricsContext context) { /* get dynamically created numeric metrics, and push the metrics */ - for (Entry entry : HRegion.numericMetrics.entrySet()) { + for (Entry entry : MetricsStorage.numericMetrics.entrySet()) { this.setNumericMetric(entry.getKey(), entry.getValue().getAndSet(0)); } /* get dynamically created numeric metrics, and push the metrics. * These ones aren't to be reset; they are cumulative. */ - for (Entry entry : HRegion.numericPersistentMetrics.entrySet()) { + for (Entry entry : MetricsStorage.numericPersistentMetrics.entrySet()) { this.setNumericMetric(entry.getKey(), entry.getValue().get()); } /* get dynamically created time varying metrics, and push the metrics */ for (Entry> entry : - HRegion.timeVaryingMetrics.entrySet()) { + MetricsStorage.timeVaryingMetrics.entrySet()) { Pair value = entry.getValue(); this.incrTimeVaryingMetric(entry.getKey(), value.getFirst().getAndSet(0), diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java index 5ad7406..4fad7bd 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java @@ -29,6 +29,9 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -39,12 +42,11 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; /** - * A collection of metric names in a given column family or a (table, column + * A names in a given column family or a (table, column * family) combination. The following "dimensions" are supported: *
    *
  • Table name (optional; enabled based on configuration)
  • @@ -303,6 +305,11 @@ public class SchemaMetrics { } } + + + public static final String METRIC_GETSIZE = "getsize"; + public static final String METRIC_NEXTSIZE = "nextsize"; + /** * Returns a {@link SchemaMetrics} object for the given table and column * family, instantiating it if necessary. @@ -366,7 +373,7 @@ public class SchemaMetrics { if (blockCategory == null) { blockCategory = BlockCategory.UNKNOWN; // So that we see this in stats. } - HRegion.incrNumericMetric(getBlockMetricName(blockCategory, + MetricsStorage.incrNumericMetric(getBlockMetricName(blockCategory, isCompaction, metricType), 1); if (blockCategory != BlockCategory.ALL_CATEGORIES) { @@ -377,7 +384,7 @@ public class SchemaMetrics { private void addToReadTime(BlockCategory blockCategory, boolean isCompaction, long timeMs) { - HRegion.incrTimeVaryingMetric(getBlockMetricName(blockCategory, + MetricsStorage.incrTimeVaryingMetric(getBlockMetricName(blockCategory, isCompaction, BlockMetricType.READ_TIME), timeMs); // Also update the read time aggregated across all block categories @@ -433,7 +440,7 @@ public class SchemaMetrics { */ public void updatePersistentStoreMetric(StoreMetricType storeMetricType, long value) { - HRegion.incrNumericPersistentMetric( + MetricsStorage.incrNumericPersistentMetric( storeMetricNames[storeMetricType.ordinal()], value); } @@ -478,7 +485,7 @@ public class SchemaMetrics { if (category == null) { category = BlockCategory.ALL_CATEGORIES; } - HRegion.incrNumericPersistentMetric(getBlockMetricName(category, false, + MetricsStorage.incrNumericPersistentMetric(getBlockMetricName(category, false, BlockMetricType.CACHE_SIZE), cacheSizeDelta); if (category != BlockCategory.ALL_CATEGORIES) { @@ -502,7 +509,7 @@ public class SchemaMetrics { * positives/negatives as specified by the argument. */ public void updateBloomMetrics(boolean isInBloom) { - HRegion.incrNumericMetric(getBloomMetricName(isInBloom), 1); + MetricsStorage.incrNumericMetric(getBloomMetricName(isInBloom), 1); if (this != ALL_SCHEMA_METRICS) { ALL_SCHEMA_METRICS.updateBloomMetrics(isInBloom); } @@ -731,11 +738,11 @@ public class SchemaMetrics { long metricValue; if (isTimeVaryingKey(metricName)) { Pair totalAndCount = - HRegion.getTimeVaryingMetric(stripTimeVaryingSuffix(metricName)); + MetricsStorage.getTimeVaryingMetric(stripTimeVaryingSuffix(metricName)); metricValue = metricName.endsWith(TOTAL_SUFFIX) ? totalAndCount.getFirst() : totalAndCount.getSecond(); } else { - metricValue = HRegion.getNumericMetric(metricName); + metricValue = MetricsStorage.getNumericMetric(metricName); } metricsSnapshot.put(metricName, metricValue); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index d2fd2ff..0736928 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.regionserver.metrics.MetricsStorage; import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics. StoreMetricType; @@ -90,7 +91,7 @@ public class TestRegionServerMetrics { Long startValue = startingMetrics.get(storeMetricName); assertEquals("Invalid value for store metric " + storeMetricName + " (type " + storeMetricType + ")", expected, - HRegion.getNumericMetric(storeMetricName) + MetricsStorage.getNumericMetric(storeMetricName) - (startValue != null ? startValue : 0)); } @@ -130,7 +131,7 @@ public class TestRegionServerMetrics { final String storeMetricName = ALL_METRICS .getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT); assertEquals("Invalid value for store metric " + storeMetricName, - NUM_FLUSHES, HRegion.getNumericMetric(storeMetricName)); + NUM_FLUSHES, MetricsStorage.getNumericMetric(storeMetricName)); } @@ -144,14 +145,14 @@ public class TestRegionServerMetrics { for (int i =0; i < cfs.length; ++i) { String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]); - String getMetric = prefix + HRegion.METRIC_GETSIZE; - String nextMetric = prefix + HRegion.METRIC_NEXTSIZE; + String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE; + String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE; // verify getsize and nextsize matches - int getSize = HRegion.numericMetrics.containsKey(getMetric) ? - HRegion.numericMetrics.get(getMetric).intValue() : 0; - int nextSize = HRegion.numericMetrics.containsKey(nextMetric) ? - HRegion.numericMetrics.get(nextMetric).intValue() : 0; + int getSize = MetricsStorage.numericMetrics.containsKey(getMetric) ? + MetricsStorage.numericMetrics.get(getMetric).intValue() : 0; + int nextSize = MetricsStorage.numericMetrics.containsKey(nextMetric) ? + MetricsStorage.numericMetrics.get(nextMetric).intValue() : 0; assertEquals(metrics[i], getSize); assertEquals(metrics[cfs.length + i], nextSize);