diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 7ab3e61..bc83764 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -38,6 +38,7 @@ import java.util.Map; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.Random; +import java.util.Set; import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.Callable; @@ -109,6 +110,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.metrics.RegionOperationMetrics; import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage; import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -336,6 +338,7 @@ public class HRegion implements HeapSize { // , Writable{ public final static String REGIONINFO_FILE = ".regioninfo"; private HTableDescriptor htableDescriptor = null; private RegionSplitPolicy splitPolicy; + private RegionOperationMetrics regionMetrics; /** * Should only be used for testing purposes @@ -358,6 +361,11 @@ public class HRegion implements HeapSize { // , Writable{ this.threadWakeFrequency = 0L; this.coprocessorHost = null; this.scannerReadPoints = new ConcurrentHashMap(); + + //Configure SchemaMetrics before trying to create a RegionOperationMetrics instance as + //RegionOperationMetrics relies on SchemaMetrics to do naming. + SchemaMetrics.configureGlobally(conf); + this.regionMetrics = new RegionOperationMetrics(SchemaMetrics.UNKNOWN, SchemaMetrics.UNKNOWN); } /** @@ -419,6 +427,11 @@ public class HRegion implements HeapSize { // , Writable{ setHTableSpecificConf(); this.regiondir = getRegionDir(this.tableDir, encodedNameStr); this.scannerReadPoints = new ConcurrentHashMap(); + + //Configure SchemaMetrics before trying to create a RegionOperationMetrics instance as + //RegionOperationMetrics relies on SchemaMetrics to do naming. + SchemaMetrics.configureGlobally(conf); + this.regionMetrics = new RegionOperationMetrics(this.regionInfo.getTableNameAsString(), this.regionInfo.getEncodedName()); /* * timestamp.slop provides a server-side constraint on the timestamp. This @@ -1808,11 +1821,7 @@ public class HRegion implements HeapSize { // , Writable{ coprocessorHost.postDelete(delete, walEdit, writeToWAL); } final long after = EnvironmentEdgeManager.currentTimeMillis(); - final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( - getTableDesc().getNameAsString(), familyMap.keySet()); - if (!metricPrefix.isEmpty()) { - RegionMetricsStorage.incrTimeVaryingMetric(metricPrefix + "delete_", after - now); - } + this.regionMetrics.updateDeleteMetrics(familyMap.keySet(), after-now); if (flush) { // Request a cache flush. Do it outside update lock. @@ -1958,11 +1967,12 @@ public class HRegion implements HeapSize { // , Writable{ @SuppressWarnings("unchecked") private long doMiniBatchPut( BatchOperationInProgress> batchOp) throws IOException { - String metricPrefix = null; + final String tableName = getTableDesc().getNameAsString(); // variable to note if all Put items are for the same CF -- metrics related boolean cfSetConsistent = true; + Set cfSet = null; long startTimeMs = EnvironmentEdgeManager.currentTimeMillis(); WALEdit walEdit = new WALEdit(); @@ -2042,19 +2052,10 @@ public class HRegion implements HeapSize { // , Writable{ lastIndexExclusive++; numReadyToWrite++; - // If first time around, designate a prefix for metrics based on the CF - // set. After that, watch for inconsistencies. - final String curMetricPrefix = - SchemaMetrics.generateSchemaMetricsPrefix(tableName, - put.getFamilyMap().keySet()); - - if (metricPrefix == null) { - metricPrefix = curMetricPrefix; - } else if (cfSetConsistent && !metricPrefix.equals(curMetricPrefix)) { - // The column family set for this batch put is undefined. - cfSetConsistent = false; - metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, - SchemaMetrics.UNKNOWN); + if (cfSet == null) { + cfSet = put.getFamilyMap().keySet(); + } else { + cfSetConsistent = cfSetConsistent && put.equals(cfSet); } } @@ -2199,11 +2200,12 @@ public class HRegion implements HeapSize { // , Writable{ // do after lock final long endTimeMs = EnvironmentEdgeManager.currentTimeMillis(); - if (metricPrefix == null) { - metricPrefix = SchemaMetrics.CF_BAD_FAMILY_PREFIX; - } - RegionMetricsStorage.incrTimeVaryingMetric(metricPrefix + "multiput_", - endTimeMs - startTimeMs); + + //See if the column families were consistent through the whole thing. + //if they were then keep them. If they were not then pass a null. + //null will be treated as unknown. + final Set keptCfs = cfSetConsistent ? cfSet : null; + this.regionMetrics.updateMultiPutMetrics(keptCfs, endTimeMs - startTimeMs); if (!success) { for (int i = firstIndex; i < lastIndexExclusive; i++) { @@ -2458,11 +2460,8 @@ public class HRegion implements HeapSize { // , Writable{ // do after lock final long after = EnvironmentEdgeManager.currentTimeMillis(); - final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( - this.getTableDesc().getNameAsString(), familyMap.keySet()); - if (!metricPrefix.isEmpty()) { - RegionMetricsStorage.incrTimeVaryingMetric(metricPrefix + "put_", after - now); - } + this.regionMetrics.updatePutMetrics(familyMap.keySet(), after - now); + if (flush) { // Request a cache flush. Do it outside update lock. @@ -4089,11 +4088,7 @@ public class HRegion implements HeapSize { // , Writable{ // do after lock final long after = EnvironmentEdgeManager.currentTimeMillis(); - final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( - this.getTableDesc().getNameAsString(), get.familySet()); - if (!metricPrefix.isEmpty()) { - RegionMetricsStorage.incrTimeVaryingMetric(metricPrefix + "get_", after - now); - } + this.regionMetrics.updateGetMetrics(get.familySet(), after - now); return results; } @@ -4494,11 +4489,16 @@ public class HRegion implements HeapSize { // , Writable{ } finally { closeRegionOperation(); } - + + long after = EnvironmentEdgeManager.currentTimeMillis(); + this.regionMetrics.updateAppendMetrics(append.getFamilyMap().keySet(), after - now); + + if (flush) { // Request a cache flush. Do it outside update lock. requestFlush(); } + return append.isReturnResults() ? new Result(allKVs) : null; } @@ -4605,7 +4605,10 @@ public class HRegion implements HeapSize { // , Writable{ } finally { closeRegionOperation(); } - + + long after = EnvironmentEdgeManager.currentTimeMillis(); + this.regionMetrics.updateIncrementMetrics(increment.getFamilyMap().keySet(), after - now); + if (flush) { // Request a cache flush. Do it outside update lock. requestFlush(); @@ -4702,10 +4705,8 @@ public class HRegion implements HeapSize { // , Writable{ // do after lock long after = EnvironmentEdgeManager.currentTimeMillis(); - String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( - getTableDesc().getName(), family); - RegionMetricsStorage.incrTimeVaryingMetric(metricPrefix + "increment_", after - before); - + this.regionMetrics.updateIncrementColumnValueMetrics(family, after - before); + if (flush) { // Request a cache flush. Do it outside update lock. requestFlush(); @@ -4734,7 +4735,7 @@ public class HRegion implements HeapSize { // , Writable{ public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + ClassSize.ARRAY + - 32 * ClassSize.REFERENCE + Bytes.SIZEOF_INT + + 33 * ClassSize.REFERENCE + Bytes.SIZEOF_INT + (6 * Bytes.SIZEOF_LONG) + Bytes.SIZEOF_BOOLEAN); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionOperationMetrics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionOperationMetrics.java new file mode 100644 index 0000000..9727767 --- /dev/null +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionOperationMetrics.java @@ -0,0 +1,164 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.regionserver.metrics; + +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * This class provides a simplified interface to expose time varying metrics + * about GET/DELETE/PUT/ICV operations on a region. All metrics are stored in + * {@link RegionMetricsStorage} and exposed to hadoop metrics through + * {@link RegionServerDynamicMetrics}. + */ +@InterfaceAudience.Private +public class RegionOperationMetrics { + + private static final String DELETE_KEY = "delete_"; + private static final String PUT_KEY = "put_"; + private static final String GET_KEY = "get_"; + private static final String ICV_KEY = "incrementColumnValue_"; + private static final String INCREMENT_KEY = "increment_"; + private static final String MULTIPUT_KEY = "multiput_"; + private static final String APPEND_KEY = "append_"; + + private String tableName = null; + private String regionName = null; + private String regionMetrixPrefix = null; + + /** + * Create a new RegionOperationMetrics object + * + * @param tableName + * the name of the table + * @param regionName + * The name of the region metrics are being reported for. Pass in the + * encoded region name to make sure that the names don't get too + * large. + */ + public RegionOperationMetrics(String tableName, String regionName) { + this.tableName = tableName; + this.regionName = regionName; + this.regionMetrixPrefix = + SchemaMetrics.generateRegionMetricsPrefix(this.tableName, this.regionName); + } + + /** + * Update the stats associated with mutliputs. + * + * @param columnFamilies + * Set of CF's this multiput is associated with + * @param value + * the time + */ + public void updateMultiPutMetrics(Set columnFamilies, long value) { + doUpdateTimeVarying(columnFamilies, MULTIPUT_KEY, value); + } + + /** + * Update the metrics associated with gets. + * + * @param columnFamilies + * Set of Column Families in this get. + * @param value + * the time + */ + public void updateGetMetrics(Set columnFamilies, long value) { + doUpdateTimeVarying(columnFamilies, GET_KEY, value); + } + + public void updateIncrementMetrics(Set columnFamilies, long value) { + doUpdateTimeVarying(columnFamilies, INCREMENT_KEY, value); + } + + public void updateAppendMetrics(Set columnFamilies, long value) { + doUpdateTimeVarying(columnFamilies, APPEND_KEY, value); + } + + + /** + * Update the metrics associated with Increments Column Values + * + * @param columnFamily + * The single column family associated with an ICV + * @param value + * the time + */ + public void updateIncrementColumnValueMetrics(byte[] columnFamily, long value) { + String cfMetricPrefix = + SchemaMetrics.generateSchemaMetricsPrefix(this.tableName, Bytes.toString(columnFamily)); + doSafeIncTimeVarying(cfMetricPrefix, ICV_KEY, value); + doSafeIncTimeVarying(this.regionMetrixPrefix, ICV_KEY, value); + } + + /** + * update metrics associated with a put + * + * @param columnFamilies + * Set of column families involved. + * @param value + * the time. + */ + public void updatePutMetrics(Set columnFamilies, long value) { + doUpdateTimeVarying(columnFamilies, PUT_KEY, value); + } + + /** + * update delete metrics. + * + * @param columnFamilies + * @param value + * the time. + */ + public void updateDeleteMetrics(Set columnFamilies, long value) { + doUpdateTimeVarying(columnFamilies, DELETE_KEY, value); + } + + /** + * Method to send updates for cf and region metrics. This is the normal method + * used if the naming of stats and CF's are in line with put/delete/multiput. + * + * @param columnFamilies + * the set of column families involved. + * @param key + * the metric name. + * @param value + * the time. + */ + private void doUpdateTimeVarying(Set columnFamilies, String key, long value) { + String cfMetricPrefix = null; + if (columnFamilies != null) { + cfMetricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, columnFamilies); + } else { + cfMetricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, SchemaMetrics.UNKNOWN); + } + + doSafeIncTimeVarying(cfMetricPrefix, key, value); + doSafeIncTimeVarying(this.regionMetrixPrefix, key, value); + } + + private void doSafeIncTimeVarying(String prefix, String key, long value) { + if (prefix != null && !prefix.isEmpty() && key != null && !key.isEmpty()) { + RegionMetricsStorage.incrTimeVaryingMetric(prefix + key, value); + } + } + +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java index ee938cc..01cbac3 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java @@ -172,6 +172,7 @@ public class SchemaMetrics { private static final String TABLE_PREFIX = "tbl."; public static final String CF_PREFIX = "cf."; public static final String BLOCK_TYPE_PREFIX = "bt."; + public static final String REGION_PREFIX = "region."; public static final String CF_UNKNOWN_PREFIX = CF_PREFIX + UNKNOWN + "."; public static final String CF_BAD_FAMILY_PREFIX = CF_PREFIX + "__badfamily."; @@ -622,6 +623,23 @@ public class SchemaMetrics { return SchemaMetrics.generateSchemaMetricsPrefix(tableName, sb.toString()); } + + + /** + * Get the prefix for metrics generated about a single region. + * @param tableName the table name or {@link #TOTAL_KEY} for all tables + * @param regionName regionName + * @return the prefix for this table/region combination. + */ + static String generateRegionMetricsPrefix(String tableName, String regionName) { + tableName = getEffectiveTableName(tableName); + String schemaMetricPrefix = + tableName.equals(TOTAL_KEY) ? "" : TABLE_PREFIX + tableName + "."; + schemaMetricPrefix += + regionName.equals(TOTAL_KEY) ? "" : REGION_PREFIX + regionName + "."; + + return schemaMetricPrefix; + } /** * Sets the flag of whether to use table name in metric names. This flag