Index: src/main/java/org/apache/hadoop/hbase/HServerLoad.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/HServerLoad.java (revision 1081925) +++ src/main/java/org/apache/hadoop/hbase/HServerLoad.java (working copy) @@ -65,8 +67,10 @@ private int memstoreSizeMB; /** the current total size of storefile indexes for the region, in MB */ private int storefileIndexSizeMB; - /** the current total request made to region */ - private long requestsCount; + /** the current total read requests made to region */ + private int readRequestsCount; + /** the current total write requests made to region */ + private int writeRequestsCount; /** * Constructor, for Writable @@ -82,19 +86,21 @@ * @param storefileSizeMB * @param memstoreSizeMB * @param storefileIndexSizeMB - * @param requestsCount + * @param readRequestsCount + * @param writeRequestsCount */ public RegionLoad(final byte[] name, final int stores, final int storefiles, final int storefileSizeMB, final int memstoreSizeMB, final int storefileIndexSizeMB, - final long requestsCount) { + final int readRequestsCount, final int writeRequestsCount) { this.name = name; this.stores = stores; this.storefiles = storefiles; this.storefileSizeMB = storefileSizeMB; this.memstoreSizeMB = memstoreSizeMB; this.storefileIndexSizeMB = storefileIndexSizeMB; - this.requestsCount = requestsCount; + this.readRequestsCount = readRequestsCount; + this.writeRequestsCount = writeRequestsCount; } // Getters @@ -147,14 +153,28 @@ public int getStorefileIndexSizeMB() { return storefileIndexSizeMB; } - + /** * @return the number of requests made to region */ public long getRequestsCount() { - return requestsCount; + return readRequestsCount + writeRequestsCount; } + /** + * @return the number of read requests made to region + */ + public long getReadRequestsCount() { + return readRequestsCount; + } + + /** + * @return the number of read requests made to region + */ + public long getWriteRequestsCount() { + return writeRequestsCount; + } + // Setters /** @@ -194,12 +214,19 @@ } /** - * @param requestsCount the number of requests to region + * @param requestsCount the number of read requests to region */ - public void setRequestsCount(long requestsCount) { - this.requestsCount = requestsCount; + public void setReadRequestsCount(long requestsCount) { + this.readRequestsCount = requestsCount; } + /** + * @param requestsCount the number of write requests to region + */ + public void setWriteRequestsCount(long requestsCount) { + this.writeRequestsCount = requestsCount; + } + // Writable public void readFields(DataInput in) throws IOException { int namelen = in.readInt(); @@ -210,7 +237,8 @@ this.storefileSizeMB = in.readInt(); this.memstoreSizeMB = in.readInt(); this.storefileIndexSizeMB = in.readInt(); - this.requestsCount = in.readLong(); + this.readRequestsCount = in.readLong(); + this.writeRequestsCount = in.readLong(); } public void write(DataOutput out) throws IOException { @@ -221,7 +249,8 @@ out.writeInt(storefileSizeMB); out.writeInt(memstoreSizeMB); out.writeInt(storefileIndexSizeMB); - out.writeLong(requestsCount); + out.writeLong(readRequestsCount); + out.writeLong(writeRequestsCount); } /** @@ -239,8 +268,10 @@ Integer.valueOf(this.memstoreSizeMB)); sb = Strings.appendKeyValue(sb, "storefileIndexSizeMB", Integer.valueOf(this.storefileIndexSizeMB)); - sb = Strings.appendKeyValue(sb, "requestsCount", - Long.valueOf(this.requestsCount)); + sb = Strings.appendKeyValue(sb, "readRequestsCount", + Long.valueOf(this.readRequestsCount)); + sb = Strings.appendKeyValue(sb, "writeRequestsCount", + Long.valueOf(this.writeRequestsCount)); return sb.toString(); } } @@ -482,9 +513,9 @@ public void addRegionInfo(final byte[] name, final int stores, final int storefiles, final int storefileSizeMB, final int memstoreSizeMB, final int storefileIndexSizeMB, - final long requestsCount) { + final int readRequestsCount, final int writeRequestsCount) { this.regionLoad.add(new HServerLoad.RegionLoad(name, stores, storefiles, - storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB, requestsCount)); + storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB, readRequestsCount, writeRequestsCount)); } // Writable Index: src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (revision 1081925) +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (working copy) @@ -543,7 +543,8 @@ HRegion r = HRegion.newHRegion(this.parent.getTableDir(), this.parent.getLog(), fs, this.parent.getConf(), hri, rsServices); - r.requestsCount.set(this.parent.getRequestsCount() / 2); + r.readRequestsCount.set(this.parent.getReadRequestsCount() / 2); + r.writeRequestsCount.set(this.parent.getWriteRequestsCount() / 2); HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir()); return r; } Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1081925) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; @@ -75,6 +74,7 @@ import org.apache.hadoop.hbase.UnknownRowLockException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.YouAreDeadException; +import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.RootLocationEditor; @@ -901,7 +901,6 @@ int storefileSizeMB = 0; int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024); int storefileIndexSizeMB = 0; - long requestsCount = r.requestsCount.get(); synchronized (r.stores) { stores += r.stores.size(); for (Store store : r.stores.values()) { @@ -911,7 +910,8 @@ } } return new HServerLoad.RegionLoad(name,stores, storefiles, - storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB, requestsCount); + storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB, + r.readRequestsCount.get(), r.writeRequestsCount.get()); } /** @@ -1150,13 +1150,15 @@ int stores = 0; int storefiles = 0; long memstoreSize = 0; - long requestsCount = 0; + int readRequestsCount = 0; + int writeRequestsCount = 0; long storefileIndexSize = 0; synchronized (this.onlineRegions) { for (Map.Entry e : this.onlineRegions.entrySet()) { HRegion r = e.getValue(); memstoreSize += r.memstoreSize.get(); - requestsCount += r.requestsCount.get(); + readRequestsCount += r.readRequestsCount.get(); + writeRequestsCount += r.writeRequestsCount.get(); synchronized (r.stores) { stores += r.stores.size(); for (Map.Entry ee : r.stores.entrySet()) { @@ -1170,7 +1172,8 @@ this.metrics.stores.set(stores); this.metrics.storefiles.set(storefiles); this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024))); - this.metrics.requestsCount.set(requestsCount); + this.metrics.readRequestsCount.set(readRequestsCount); + this.metrics.writeRequestsCount.set(writeRequestsCount); this.metrics.storefileIndexSizeMB .set((int) (storefileIndexSize / (1024 * 1024))); this.metrics.compactionQueueSize.set(compactSplitThread Index: src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (revision 1081925) +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (working copy) @@ -128,11 +128,16 @@ public final MetricsIntValue storefiles = new MetricsIntValue("storefiles", registry); /** - * Count of requests + * Count of read requests */ - public final MetricsLongValue requestsCount = new MetricsLongValue("requestsCount", registry); + public final MetricsLongValue readRequestsCount = new MetricsLongValue("readRequestsCount", registry); /** + * Count of write requests + */ + public final MetricsLongValue writeRequestsCount = new MetricsLongValue("writeRequestsCount", registry); + + /** * Sum of all the storefile index sizes in this regionserver in MB */ public final MetricsIntValue storefileIndexSizeMB = @@ -248,7 +253,8 @@ this.storefiles.pushMetric(this.metricsRecord); this.storefileIndexSizeMB.pushMetric(this.metricsRecord); this.memstoreSizeMB.pushMetric(this.metricsRecord); - this.requestsCount.pushMetric(this.metricsRecord); + this.readRequestsCount.pushMetric(this.metricsRecord); + this.writeRequestsCount.pushMetric(this.metricsRecord); this.regions.pushMetric(this.metricsRecord); this.requests.pushMetric(this.metricsRecord); this.compactionQueueSize.pushMetric(this.metricsRecord); @@ -351,8 +357,10 @@ Integer.valueOf(this.storefileIndexSizeMB.get())); sb = Strings.appendKeyValue(sb, "memstoreSize", Integer.valueOf(this.memstoreSizeMB.get())); - sb = Strings.appendKeyValue(sb, "requestsCount", - Long.valueOf(this.requestsCount.get())); + sb = Strings.appendKeyValue(sb, "readRequestsCount", + Long.valueOf(this.readRequestsCount.get())); + sb = Strings.appendKeyValue(sb, "writeRequestsCount", + Long.valueOf(this.writeRequestsCount.get())); sb = Strings.appendKeyValue(sb, "compactionQueueSize", Integer.valueOf(this.compactionQueueSize.get())); sb = Strings.appendKeyValue(sb, "flushQueueSize", Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1081925) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -175,7 +175,8 @@ final AtomicLong memstoreSize = new AtomicLong(0); - final Counter requestsCount = new Counter(); + final Counter readRequestsCount = new Counter(); + final Counter writeRequestsCount = new Counter(); /** * The directory for the table this region is part of. @@ -460,9 +461,19 @@ /** @return requestsCount for this region */ public long getRequestsCount() { - return this.requestsCount.get(); + return this.readRequestsCount.get() + this.writeRequestsCount.get(); } + /** @return readRequestsCount for this region */ + public long getReadRequestsCount() { + return this.readRequestsCount.get(); + } + + /** @return writeRequestsCount for this region */ + public long getWriteRequestsCount() { + return this.writeRequestsCount.get(); + } + /** @return true if region is closed */ public boolean isClosed() { return this.closed.get(); @@ -1129,6 +1140,7 @@ // closest key is across all column families, since the data may be sparse checkRow(row); startRegionOperation(); + this.readRequestsCount.increment(); try { Store store = getStore(family); KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP); @@ -1165,6 +1177,7 @@ protected InternalScanner getScanner(Scan scan, List additionalScanners) throws IOException { startRegionOperation(); + this.readRequestsCount.increment(); try { // Verify families are all valid if(scan.hasFamilies()) { @@ -1233,6 +1246,7 @@ checkResources(); Integer lid = null; startRegionOperation(); + this.writeRequestsCount.increment(); try { byte [] row = delete.getRow(); // If we did not pass an existing row lock, obtain a new one @@ -1386,6 +1400,7 @@ // will be extremely rare; we'll deal with it when it happens. checkResources(); startRegionOperation(); + this.writeRequestsCount.increment(); try { // We obtain a per-row lock, so other clients will block while one client // performs an update. The read lock is released by the client calling @@ -1458,6 +1473,7 @@ long newSize; startRegionOperation(); + this.writeRequestsCount.increment(); try { long addedSize = doMiniBatchPut(batchOp); newSize = memstoreSize.addAndGet(addedSize); @@ -1657,6 +1673,7 @@ } startRegionOperation(); + this.writeRequestsCount.increment(); try { RowLock lock = isPut ? ((Put)w).getRowLock() : ((Delete)w).getRowLock(); Get get = new Get(row, lock); @@ -2205,6 +2222,7 @@ */ public Integer obtainRowLock(final byte [] row) throws IOException { startRegionOperation(); + this.writeRequestsCount.increment(); try { return internalObtainRowLock(row, true); } finally { @@ -2213,21 +2231,6 @@ } /** - * Tries to obtain a row lock on the given row, but does not block if the - * row lock is not available. If the lock is not available, returns false. - * Otherwise behaves the same as the above method. - * @see HRegion#obtainRowLock(byte[]) - */ - public Integer tryObtainRowLock(final byte[] row) throws IOException { - startRegionOperation(); - try { - return internalObtainRowLock(row, false); - } finally { - closeRegionOperation(); - } - } - - /** * Obtains or tries to obtain the given row lock. * @param waitForLock if true, will block until the lock is available. * Otherwise, just tries to obtain the lock and returns @@ -2338,6 +2341,7 @@ public void bulkLoadHFile(String hfilePath, byte[] familyName) throws IOException { startRegionOperation(); + this.writeRequestsCount.increment(); try { Store store = getStore(familyName); if (store == null) { @@ -2451,6 +2455,7 @@ "or a lengthy garbage collection"); } startRegionOperation(); + readRequestsCount.increment(); try { // This could be a new thread from the last time we called next(). @@ -2970,7 +2975,8 @@ listPaths(fs, newRegionDir); } HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf, newRegionInfo, null); - dstRegion.requestsCount.set(a.requestsCount.get() + b.requestsCount.get()); + dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get()); + dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get()); dstRegion.initialize(); dstRegion.compactStores(); if (LOG.isDebugEnabled()) { @@ -3216,6 +3222,7 @@ // Lock row startRegionOperation(); + this.writeRequestsCount.increment(); try { Integer lid = getLock(lockid, row, true); this.updatesLock.readLock().lock(); @@ -3306,6 +3313,7 @@ // Lock row long result = amount; startRegionOperation(); + this.writeRequestsCount.increment(); try { Integer lid = obtainRowLock(row); this.updatesLock.readLock().lock(); @@ -3380,7 +3388,7 @@ public static final long FIXED_OVERHEAD = ClassSize.align( (4 * Bytes.SIZEOF_LONG) + ClassSize.ARRAY + - (25 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT); + (26 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT); public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + (ClassSize.OBJECT * 2) + (2 * ClassSize.ATOMIC_BOOLEAN) + @@ -3559,7 +3567,6 @@ } finally { scanner.close(); } - // System.out.println(region.getClosestRowBefore(Bytes.toBytes("GeneratedCSVContent2,E3652782193BC8D66A0BA1629D0FAAAB,9993372036854775807"))); } } finally { region.close(); @@ -3643,7 +3650,6 @@ throw new NotServingRegionException(regionInfo.getRegionNameAsString() + " is closed"); } - this.requestsCount.increment(); } /**