diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index c79f9a9..a388200 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1008,7 +1008,7 @@ public class HRegion implements HeapSize { // , Writable{ status.setStatus("Running coprocessor post-close hooks"); this.coprocessorHost.postClose(abort); } - this.opMetrics.closeMetrics(); + this.opMetrics.closeMetrics(this.getRegionInfo().getEncodedName()); status.markComplete("Closed"); LOG.info("Closed " + this); return result; @@ -1648,6 +1648,7 @@ public class HRegion implements HeapSize { // , Writable{ checkRow(row, "getClosestRowBefore"); startRegionOperation(); this.readRequestsCount.increment(); + this.opMetrics.setReadRequestCountMetrics(this.readRequestsCount.get()); try { Store store = getStore(family); // get the closest key. (HStore.getRowKeyAtOrBefore can return null) @@ -1694,6 +1695,7 @@ public class HRegion implements HeapSize { // , Writable{ List additionalScanners) throws IOException { startRegionOperation(); this.readRequestsCount.increment(); + this.opMetrics.setReadRequestCountMetrics(this.readRequestsCount.get()); try { // Verify families are all valid prepareScanner(scan); @@ -1761,6 +1763,7 @@ public class HRegion implements HeapSize { // , Writable{ Integer lid = null; startRegionOperation(); this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics(this.writeRequestsCount.get()); try { byte [] row = delete.getRow(); // If we did not pass an existing row lock, obtain a new one @@ -1953,6 +1956,7 @@ public class HRegion implements HeapSize { // , Writable{ checkResources(); startRegionOperation(); this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics(this.writeRequestsCount.get()); try { // We obtain a per-row lock, so other clients will block while one client // performs an update. The read lock is released by the client calling @@ -2053,6 +2057,7 @@ public class HRegion implements HeapSize { // , Writable{ try { if (!initialized) { this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics(this.writeRequestsCount.get()); doPreMutationHook(batchOp); initialized = true; } @@ -2463,6 +2468,7 @@ public class HRegion implements HeapSize { // , Writable{ startRegionOperation(); this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics(this.writeRequestsCount.get()); try { RowLock lock = isPut ? ((Put)w).getRowLock() : ((Delete)w).getRowLock(); Get get = new Get(row, lock); @@ -3222,6 +3228,7 @@ public class HRegion implements HeapSize { // , Writable{ public Integer obtainRowLock(final byte [] row) throws IOException { startRegionOperation(); this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics( this.writeRequestsCount.get()); try { return internalObtainRowLock(row, true); } finally { @@ -3394,6 +3401,7 @@ public class HRegion implements HeapSize { // , Writable{ startBulkRegionOperation(hasMultipleColumnFamilies(familyPaths)); try { this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics( this.writeRequestsCount.get()); // There possibly was a split that happend between when the split keys // were gathered and before the HReiogn's write lock was taken. We need @@ -3615,6 +3623,7 @@ public class HRegion implements HeapSize { // , Writable{ } startRegionOperation(); readRequestsCount.increment(); + opMetrics.setReadRequestCountMetrics(readRequestsCount.get()); try { // This could be a new thread from the last time we called next(). @@ -4422,8 +4431,14 @@ public class HRegion implements HeapSize { // , Writable{ } HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf, newRegionInfo, a.getTableDesc(), null); - dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get()); - dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get()); + long totalReadRequestCount = a.readRequestsCount.get() + b.readRequestsCount.get(); + dstRegion.readRequestsCount.set(totalReadRequestCount); + dstRegion.opMetrics.setReadRequestCountMetrics(totalReadRequestCount); + + long totalWriteRequestCount = a.writeRequestsCount.get() + b.writeRequestsCount.get(); + dstRegion.writeRequestsCount.set(totalWriteRequestCount); + dstRegion.opMetrics.setWriteRequestCountMetrics(totalWriteRequestCount); + dstRegion.initialize(); dstRegion.compactStores(); if (LOG.isDebugEnabled()) { @@ -4812,6 +4827,7 @@ public class HRegion implements HeapSize { // , Writable{ // Lock row startRegionOperation(); this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics(this.writeRequestsCount.get()); try { Integer lid = getLock(lockid, row, true); lock(this.updatesLock.readLock()); @@ -4981,6 +4997,7 @@ public class HRegion implements HeapSize { // , Writable{ // Lock row startRegionOperation(); this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics(this.writeRequestsCount.get()); try { Integer lid = getLock(lockid, row, true); lock(this.updatesLock.readLock()); @@ -5099,6 +5116,7 @@ public class HRegion implements HeapSize { // , Writable{ long result = amount; startRegionOperation(); this.writeRequestsCount.increment(); + this.opMetrics.setWriteRequestCountMetrics(this.writeRequestsCount.get()); try { Integer lid = obtainRowLock(row); lock(this.updatesLock.readLock()); @@ -5484,6 +5502,24 @@ public class HRegion implements HeapSize { // , Writable{ return coprocessorHost; } + /* + * Set the read request count defined in opMetrics + * @param value absolute value of read request count + */ + public void setOpMetricsReadRequestCount(long value) + { + this.opMetrics.setReadRequestCountMetrics(value); + } + + /* + * Set the write request count defined in opMetrics + * @param value absolute value of write request count + */ + public void setOpMetricsWriteRequestCount(long value) + { + this.opMetrics.setWriteRequestCountMetrics(value); + } + /** @param coprocessorHost the new coprocessor host */ public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { this.coprocessorHost = coprocessorHost; diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 86d1337..6db8817 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -2495,6 +2495,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, } requestCount.addAndGet(i); region.readRequestsCount.add(i); + region.setOpMetricsReadRequestCount(region.readRequestsCount.get()); } finally { region.closeRegionOperation(); } diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 044c8f0..cae5d48 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -700,8 +700,12 @@ public class SplitTransaction { HRegion r = HRegion.newHRegion(this.parent.getTableDir(), this.parent.getLog(), fs, this.parent.getConf(), hri, this.parent.getTableDesc(), rsServices); - r.readRequestsCount.set(this.parent.getReadRequestsCount() / 2); - r.writeRequestsCount.set(this.parent.getWriteRequestsCount() / 2); + long halfParentReadRequestCount = this.parent.getReadRequestsCount() / 2; + r.readRequestsCount.set(halfParentReadRequestCount); + r.setOpMetricsReadRequestCount(halfParentReadRequestCount); + long halfParentWriteRequest = this.parent.getWriteRequestsCount() / 2; + r.writeRequestsCount.set(halfParentWriteRequest); + r.setOpMetricsWriteRequestCount(halfParentWriteRequest); HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir()); return r; } diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java b/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java index eacb702..9e99857 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java @@ -46,6 +46,8 @@ public class OperationMetrics { private static final String MULTIPUT_KEY = "multiput_"; private static final String MULTIDELETE_KEY = "multidelete_"; private static final String APPEND_KEY = "append_"; + private static final String READREQUESTCOUNT_KEY = "readrequestcount"; + private static final String WRITEREQUESTCOUNT_KEY = "writerequestcount"; /** Conf key controlling whether we should expose metrics.*/ private static final String CONF_KEY = @@ -98,6 +100,27 @@ public class OperationMetrics { this(null, null); } + /* + * This is used in set the read request count that is going to be exposed to + * hadoop metric framework. + * @param value absolute value of read account + */ + public void setReadRequestCountMetrics(long value) { + doSetNumericPersistentMetrics(READREQUESTCOUNT_KEY, value); + } + + /* + * This is used in set the read request count that is going to be exposed to + * hadoop metric framework. + * @param value absolute value of write account + */ + public void setWriteRequestCountMetrics(long value) { + doSetNumericPersistentMetrics(WRITEREQUESTCOUNT_KEY, value); + } + + private void doSetNumericPersistentMetrics(String key, long value) { + RegionMetricsStorage.setNumericPersistentMetric(this.regionMetrixPrefix+key, value); + } /** * Update the stats associated with {@link HTable#put(java.util.List)}. @@ -190,11 +213,15 @@ public class OperationMetrics { doUpdateTimeVarying(columnFamilies, DELETE_KEY, value); } + + /** - * This deletes all old metrics this instance has ever created or updated. + * This deletes all old non-persistent metrics this instance has ever created or updated. + * for persistent metrics, only delete for the region to be closed + * @param regionEncodedName the region that is to be closed */ - public void closeMetrics() { - RegionMetricsStorage.clear(); + public void closeMetrics(String regionEncodedName) { + RegionMetricsStorage.clear(regionEncodedName); } /** diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java b/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java index 3239540..515ffe0 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java @@ -20,11 +20,13 @@ package org.apache.hadoop.hbase.regionserver.metrics; import java.util.Map; +import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Pair; /** @@ -88,6 +90,9 @@ public class RegionMetricsStorage { oldVal.getSecond().incrementAndGet(); // increment ops by 1 } + public static void setNumericPersistentMetric(String key, long amount) { + numericPersistentMetrics.put(key, new AtomicLong(amount)); + } public static void incrNumericPersistentMetric(String key, long amount) { AtomicLong oldVal = numericPersistentMetrics.get(key); if (oldVal == null) { @@ -126,11 +131,18 @@ public class RegionMetricsStorage { } /** - * Clear all copies of the metrics this stores. + * Clear the timevarying and numeric metrics for all regions in this region server + * Clear the numericPersistentMerics for only the region being closed. */ - public static void clear() { + public static void clear(String regionEncodedName) { timeVaryingMetrics.clear(); numericMetrics.clear(); - numericPersistentMetrics.clear(); + for (Entry entry : RegionMetricsStorage.getNumericPersistentMetrics().entrySet()) { + if (entry.getKey().contains(regionEncodedName)) + { + String keyName = entry.getKey(); + numericPersistentMetrics.remove(keyName); + } + } } }