From 31e4715c916cdf50a280c10270b1e04d0ede743b Mon Sep 17 00:00:00 2001 From: Ashu Pachauri Date: Mon, 2 Nov 2015 17:15:11 -0800 Subject: [PATCH] HBASE-14724: Per CF request count metrics Summary: Adding per column family per region operation count metrics for the following operations: 1. Get 2. Put 3. Append 4. Delete 5. ScanNext 6. Increment When a batchMutate (Put/Delete) has an inconsistent set of column families, the name used for reporting the metrics is _UNKNOWN_. Test Plan: Unit tests. Differential Revision: https://reviews.facebook.net/D51777 --- .../hbase/regionserver/MetricsRegionWrapper.java | 38 ++++++++ .../regionserver/MetricsRegionSourceImpl.java | 39 ++++++++ .../regionserver/TestMetricsRegionSourceImpl.java | 33 +++++++ .../apache/hadoop/hbase/regionserver/HRegion.java | 107 ++++++++++++++++++++- .../regionserver/MetricsRegionWrapperImpl.java | 48 +++++++++ .../hadoop/hbase/regionserver/RSRpcServices.java | 10 ++ .../apache/hadoop/hbase/regionserver/Region.java | 20 ++++ .../regionserver/MetricsRegionWrapperStub.java | 39 ++++++++ .../hadoop/hbase/regionserver/TestHRegion.java | 55 +++++++++++ .../hbase/regionserver/TestMetricsRegion.java | 18 ++++ 10 files changed, 405 insertions(+), 2 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 20ca9bd..eab05d5 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.regionserver; +import java.util.Map; + /** * Interface of class that will wrap an HRegion and export numbers so they can be * used in MetricsRegionSource @@ -91,4 +93,40 @@ public interface MetricsRegionWrapper { * Get the replica id of this region. */ int getReplicaId(); + + /** + * Get per-column family count of Get requests. + * @return A map with column family names as keys and get request counts as values. + */ + Map getCFGetCount(); + + /** + * Get per-column family count of Put requests. + * @return A map with column family names as keys and Put request counts as values. + */ + Map getCFPutCount(); + + /** + * Get per-column family count of Append requests. + * @return A map with column family names as keys and append request counts as values. + */ + Map getCFAppendCount(); + + /** + * Get per-column family count of Delete requests. + * @return A map with column family names as keys and delete request counts as values. + */ + Map getCFDeleteCount(); + + /** + * Get per-column family count of Increment requests. + * @return A map with column family names as keys and increment request counts as values. + */ + Map getCFIncrementCount(); + + /** + * Get per-column family count of Scanner#next requests. + * @return A map with column family names as keys and scanner#next request counts as values. + */ + Map getCFScanNextCount(); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index fab6b51..089a387 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; @@ -46,6 +47,7 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final DynamicMetricsRegistry registry; private final String regionNamePrefix; + private final String regionCFPrefix; private final String regionPutKey; private final String regionDeleteKey; private final String regionGetKey; @@ -77,6 +79,11 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { "_region_" + regionWrapper.getRegionName() + "_metric_"; + regionCFPrefix = "Namespace_" + regionWrapper.getNamespace() + + "_table_" + regionWrapper.getTableName() + + "_region_" + regionWrapper.getRegionName() + + "_cf_"; + String suffix = "Count"; regionPutKey = regionNamePrefix + MetricsRegionServerSource.MUTATE_KEY + suffix; @@ -247,6 +254,38 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.REPLICA_ID, MetricsRegionSource.REPLICA_ID_DESC), this.regionWrapper.getReplicaId()); + + addCFMetrics(mrb); + } + } + + /** + * Adds the column family request counters for all operations to the given metrics builder. + * @param mrb The metrics record builder. + */ + private void addCFMetrics(MetricsRecordBuilder mrb) { + addCFCounter(mrb, regionWrapper.getCFGetCount(), MetricsRegionServerSource.GET_KEY); + addCFCounter(mrb, regionWrapper.getCFPutCount(), MetricsRegionServerSource.MUTATE_KEY); + addCFCounter(mrb, regionWrapper.getCFDeleteCount(), MetricsRegionServerSource.DELETE_KEY); + addCFCounter(mrb, regionWrapper.getCFScanNextCount(), + MetricsRegionServerSource.SCAN_NEXT_KEY); + addCFCounter(mrb, regionWrapper.getCFAppendCount(), MetricsRegionServerSource.APPEND_KEY); + addCFCounter(mrb, regionWrapper.getCFIncrementCount(), + MetricsRegionServerSource.INCREMENT_KEY); + } + + /** + * Given the map of column family and request count for a given operation, add it to the metrics. + * @param mrb The metrics record builder. + * @param cfCounters The (column family name, request count) map. + * @param opPrefix The operation name for which metrics to add. + */ + private void addCFCounter(final MetricsRecordBuilder mrb, + Map cfCounters, String opPrefix) { + + for(Map.Entry entry: cfCounters.entrySet()) { + mrb.addCounter(Interns.info(regionCFPrefix + entry.getKey() + "_metric_" + opPrefix + + "Count", opPrefix + " count for column family " + entry.getKey()), entry.getValue()); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java index 19624aa..9b865bb 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -29,6 +29,9 @@ import org.apache.hadoop.hbase.testclassification.MetricsTests; import org.junit.Test; import org.junit.experimental.categories.Category; +import java.util.HashMap; +import java.util.Map; + @Category({MetricsTests.class, SmallTests.class}) public class TestMetricsRegionSourceImpl { @@ -143,5 +146,35 @@ public class TestMetricsRegionSourceImpl { public int getReplicaId() { return 0; } + + @Override + public Map getCFGetCount() { + return new HashMap<>(); + } + + @Override + public Map getCFPutCount() { + return new HashMap<>(); + } + + @Override + public Map getCFScanNextCount() { + return new HashMap<>(); + } + + @Override + public Map getCFAppendCount() { + return new HashMap<>(); + } + + @Override + public Map getCFIncrementCount() { + return new HashMap<>(); + } + + @Override + public Map getCFDeleteCount() { + return new HashMap<>(); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 3e6c092..9d679f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -254,6 +254,17 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi protected final Map stores = new ConcurrentSkipListMap( Bytes.BYTES_RAWCOMPARATOR); + private final Map> opsPerCF = new HashMap<>(); + private final Map opsAllCF = new HashMap<>(); + // We collect per CF num ops metrics only for these operations + protected static final Operation[] SUPPORTED_OPS_PER_CF_COUNT = {Operation.GET, Operation.PUT, + Operation.DELETE, Operation.SCAN, Operation.APPEND, Operation.INCREMENT}; + + + // The name used for the column family when batch mutate calls have an inconsistent set of column + // families. If a user defined column family has the same name, the reported metrics are off. + protected static final byte[] UNKNOWN_CF = Bytes.toBytes("_UNKNOWN_"); + // TODO: account for each registered handler in HeapSize computation private Map coprocessorServiceHandlers = Maps.newHashMap(); @@ -933,12 +944,92 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi LOG.warn(e.getMessage()); } } + } else { + initializeCFCounters(); } } } return Math.max(maxSeqId, maxMemstoreTS + 1); } + /** + * Initialize per column family counters for this region. + */ + private void initializeCFCounters() { + if (opsPerCF.isEmpty()) { + for (int i = 0; i < SUPPORTED_OPS_PER_CF_COUNT.length; i++) { + opsPerCF.put(SUPPORTED_OPS_PER_CF_COUNT[i], + new ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR)); + } + } + opsAllCF.clear(); + for (int i = 0; i < SUPPORTED_OPS_PER_CF_COUNT.length; i++) { + opsPerCF.get(SUPPORTED_OPS_PER_CF_COUNT[i]).clear(); + opsAllCF.put(SUPPORTED_OPS_PER_CF_COUNT[i], new Counter()); + + for (byte[] family: stores.keySet()) { + opsPerCF.get(SUPPORTED_OPS_PER_CF_COUNT[i]).put(family, new Counter()); + } + opsPerCF.get(SUPPORTED_OPS_PER_CF_COUNT[i]).put(UNKNOWN_CF, new Counter()); + } + } + + /** + * Increment request counters for the given column families and operation types. + * @param families The collection of column families. + * @param opType The operation type. + */ + private void incrementCFCounters(final Collection families, final Operation opType) { + Map metricsMap = opsPerCF.get(opType); + if (families == null) { + metricsMap.get(UNKNOWN_CF).increment(); + return; + } + + // This is just an optimization. Here, we assume that a good chunk of requests come for all + // column families. If that is the case, we should not have to iterate through all of them. + if (families.isEmpty() || families.size() == stores.size()) { + opsAllCF.get(opType).increment(); + return; + } + + for (byte[] family: families) { + metricsMap.get(family).increment(); + } + } + + @Override + public void updateCFRequestCount(final Collection families, Operation op) { + incrementCFCounters(families, op); + } + + @Override + public Map getCFRequestCount(Operation op) { + return constructTotalReqCount(op); + } + + /** + * Get true values of per column family request counts for the given operation. + * We need to do this because we separately maintain request counts for operations when the + * column families in question span the whole set of column families for the table in question. + * So, we need to add this number to the already aggregated per-cf + * request count to get the true count. + * + * @param op The type of operation in question. + * @return Map containing true values for request counts per column family. + */ + private Map constructTotalReqCount(Operation op) { + Map cfCounterMap = opsPerCF.get(op); + long allCFCount = opsAllCF.get(op).get(); + + Map counters = new ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR); + + for (Map.Entry entry: cfCounterMap.entrySet()) { + counters.put(entry.getKey(), new Counter(entry.getValue().get() + allCFCount)); + } + return counters; + } + private void initializeWarmup(final CancelableProgressable reporter) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); @@ -3222,12 +3313,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (this.metricsRegion != null) { this.metricsRegion.updatePut(); } + if(putsCfSetConsistent) { + incrementCFCounters(putsCfSet, Operation.PUT); + } else { + incrementCFCounters(null, Operation.PUT); + } } if (noOfDeletes > 0) { // There were some Deletes in the batch. if (this.metricsRegion != null) { this.metricsRegion.updateDelete(); } + if(deletesCfSetConsistent) { + incrementCFCounters(deletesCfSet, Operation.DELETE); + } else { + incrementCFCounters(null, Operation.DELETE); + } } if (!success) { for (int i = firstIndex; i < lastIndexExclusive; i++) { @@ -6669,6 +6770,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } metricsUpdateForGet(results); + incrementCFCounters(get.familySet(), Operation.GET); return results; } @@ -7023,6 +7125,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi break; } } + incrementCFCounters(mutation.getFamilyCellMap().keySet(), op); } } @@ -7338,12 +7441,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + ClassSize.ARRAY + - 45 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + + 47 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + (14 * Bytes.SIZEOF_LONG) + 5 * Bytes.SIZEOF_BOOLEAN); // woefully out of date - currently missing: - // 1 x HashMap - coprocessorServiceHandlers + // 3 x HashMap - coprocessorServiceHandlers, opsPerCF, opsAllCF // 6 x Counter - numMutationsWithoutWAL, dataInMemoryWithoutWAL, // checkAndMutateChecksPassed, checkAndMutateChecksFailed, readRequestsCount, // writeRequestsCount diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java index 2c54079..e17ca30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -29,6 +30,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.metrics2.MetricsExecutor; @InterfaceAudience.Private @@ -137,6 +140,51 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable } @Override + public Map getCFGetCount() { + return convertCFCounters(this.region.getCFRequestCount(Region.Operation.GET)); + } + + @Override + public Map getCFPutCount() { + return convertCFCounters(this.region.getCFRequestCount(Region.Operation.PUT)); + } + + @Override + public Map getCFScanNextCount() { + return convertCFCounters(this.region.getCFRequestCount(Region.Operation.SCAN)); + } + + @Override + public Map getCFAppendCount() { + return convertCFCounters(this.region.getCFRequestCount(Region.Operation.APPEND)); + } + + @Override + public Map getCFIncrementCount() { + return convertCFCounters(this.region.getCFRequestCount(Region.Operation.INCREMENT)); + } + + @Override + public Map getCFDeleteCount() { + return convertCFCounters(this.region.getCFRequestCount(Region.Operation.DELETE)); + } + + /** + * Convert column family names from byte arrays to string, given the map for per cf counters. + * @param cfCounters A Map containing column family names as byte arrays and corresponding + * counter values + * @return Map with same counter values, with column family names changed to strings. + */ + private Map convertCFCounters(final Map cfCounters) { + Map result = new HashMap<>(); + + for(Map.Entry entry: cfCounters.entrySet()) { + result.put(Bytes.toString(entry.getKey()), entry.getValue().get()); + } + return result; + } + + @Override public int getRegionHashCode() { return this.region.hashCode(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 7bcde52..d9834c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -25,6 +25,7 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -247,6 +248,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private final ConcurrentHashMap scanners = new ConcurrentHashMap(); + // A Map of scanner names and corresponding column families + private final Map> scannerFamilies = new ConcurrentHashMap<>(); /** * The lease timeout period for client scanners (milliseconds). */ @@ -373,6 +376,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, @Override public void leaseExpired() { RegionScannerHolder rsh = scanners.remove(this.scannerName); + scannerFamilies.remove(this.scannerName); if (rsh != null) { RegionScanner s = rsh.s; LOG.info("Scanner " + this.scannerName + " lease expired on region " @@ -2209,6 +2213,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, region.getCoprocessorHost().postGet(get, results); } region.metricsUpdateForGet(results); + region.updateCFRequestCount(get.familySet(), Operation.GET); return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } @@ -2583,6 +2588,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } scannerId = this.scannerIdGen.incrementAndGet(); scannerName = String.valueOf(scannerId); + scannerFamilies.put(scannerName, scan.getFamilyMap().keySet()); rsh = addScanner(scannerName, scanner, region); ttl = this.scannerLeaseTimeoutPeriod; } @@ -2786,6 +2792,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, region.updateReadRequestsCount(i); long responseCellSize = context != null ? context.getResponseCellSize() : 0; region.getMetrics().updateScanNext(responseCellSize); + region.updateCFRequestCount(scannerFamilies.get(scannerName), Operation.SCAN); + if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateScannerNext(responseCellSize); } @@ -2843,6 +2851,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } rsh = scanners.remove(scannerName); + scannerFamilies.remove(scannerName); if (rsh != null) { if (context != null) { context.setCallBack(rsh.closeCallBack); @@ -2872,6 +2881,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } catch (IOException ie) { if (scannerName != null && ie instanceof NotServingRegionException) { RegionScannerHolder rsh = scanners.remove(scannerName); + scannerFamilies.remove(scannerName); if (rsh != null) { try { RegionScanner scanner = rsh.s; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 976bddb..fcbc465 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; +import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; @@ -197,6 +198,25 @@ public interface Region extends ConfigurationObserver { */ void updateWriteRequestsCount(long i); + /** + * Increment the count for the number of operations for given column families. + * This is currently supported for the following operations: + * Get, Put, Delete, Append, Scan and Increment. + * The count for a scan is the number of times Scanner made next() calls. + * @param families Collection of column families. + * @param operation The type of operation. + */ + void updateCFRequestCount(Collection families, Operation operation); + + /** + * Get a map of the number of requests per column family for the given operation. + * This is currently supported for the following operations: + * Get, Put, Delete, Append, Scan and Increment. + * The count for a scan is the number of times Scanner made next() calls. + * @param operation The operation. + */ + Map getCFRequestCount(Operation operation); + /** @return memstore size for this region, in bytes */ long getMemstoreSize(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java index 8e6dd74..95ef2fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hbase.regionserver; +import java.util.HashMap; +import java.util.Map; + public class MetricsRegionWrapperStub implements MetricsRegionWrapper { int replicaid = 0; @@ -112,4 +115,40 @@ public class MetricsRegionWrapperStub implements MetricsRegionWrapper { public int getReplicaId() { return replicaid; } + + private Map getNewMapWithValues(String key, long value) { + Map newMap = new HashMap<>(); + newMap.put(key, value); + return newMap; + } + + @Override + public Map getCFGetCount() { + return getNewMapWithValues("fam1", 10); + } + + @Override + public Map getCFPutCount() { + return getNewMapWithValues("fam1", 10); + } + + @Override + public Map getCFAppendCount() { + return getNewMapWithValues("fam1", 10); + } + + @Override + public Map getCFScanNextCount() { + return getNewMapWithValues("fam1", 10); + } + + @Override + public Map getCFIncrementCount() { + return getNewMapWithValues("fam1", 10); + } + + @Override + public Map getCFDeleteCount() { + return getNewMapWithValues("fam1", 10); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index a5574d3..5270e46 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -5965,6 +5965,61 @@ public class TestHRegion { } @Test + public void testCFCounters() throws Exception { + byte[] fam1 = Bytes.toBytes("fam1"); + byte[] fam2 = Bytes.toBytes("fam2"); + + byte[][] families = {fam1, fam2}; + this.region = initHRegion(tableName, method, CONF, families); + + Put put1 = new Put(row); + put1.addColumn(fam1, fam1, fam1); + region.put(put1); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.PUT).get(fam1).get()); + + Put put2 = new Put(row); + put2.addColumn(fam2, fam2, fam2); + region.batchMutate(new Put[]{put1, put2}); + Assert.assertEquals(1L, + region.getCFRequestCount(Region.Operation.PUT).get(HRegion.UNKNOWN_CF).get()); + + put1.addColumn(fam2, fam2, fam2); + region.put(put1); + Assert.assertEquals(2L, region.getCFRequestCount(Region.Operation.PUT).get(fam1).get()); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.PUT).get(fam2).get()); + + + Get getAll = new Get(row); + region.get(getAll); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.GET).get(fam1).get()); + Get get2 = new Get(row); + get2.addFamily(fam2); + region.get(get2); + Assert.assertEquals(2L, region.getCFRequestCount(Region.Operation.GET).get(fam2).get()); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.GET).get(fam1).get()); + + + Append append = new Append(row); + append.add(fam1, fam1, fam1); + region.append(append); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.APPEND).get(fam1).get()); + + Increment increment = new Increment(row); + region.increment(increment); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.INCREMENT).get(fam1).get()); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.INCREMENT).get(fam2).get()); + + + Delete delete = new Delete(row); + delete.addFamily(fam1); + region.delete(delete); + Assert.assertEquals(1L, region.getCFRequestCount(Region.Operation.DELETE).get(fam1).get()); + + HBaseTestingUtility.closeRegionAndWAL(this.region); + this.region = null; + } + + @Test public void testOpenRegionWrittenToWAL() throws Exception { final ServerName serverName = ServerName.valueOf("testOpenRegionWrittenToWAL", 100, 42); final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java index cc09d15..b725fe1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java @@ -52,6 +52,24 @@ public class TestMetricsRegion { HELPER.assertCounter( "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", 0, agg); + HELPER.assertCounter( + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_cf_fam1_metric_getCount", + 10, agg); + HELPER.assertCounter( + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_cf_fam1_metric_mutateCount", + 10, agg); + HELPER.assertCounter( + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_cf_fam1_metric_scanNextCount", + 10, agg); + HELPER.assertCounter( + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_cf_fam1_metric_appendCount", + 10, agg); + HELPER.assertCounter( + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_cf_fam1_metric_deleteCount", + 10, agg); + HELPER.assertCounter( + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_cf_fam1_metric_incrementCount", + 10, agg); mr.close(); // test region with replica id > 0 -- 1.9.5