Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (revision 1419943) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (working copy) @@ -99,6 +99,18 @@ } @Override + public boolean nextRaw(List result, int limit, String metric) + throws IOException { + return delegate.nextRaw(result, limit, metric); + } + + @Override + public boolean nextRaw(List result) + throws IOException { + return delegate.nextRaw(result); + } + + @Override public void close() throws IOException { delegate.close(); } @@ -123,6 +135,10 @@ return delegate.getMaxResultSize(); } + @Override + public long getReadPt() { + return delegate.getReadPt(); + } } public static class CoprocessorImpl extends BaseRegionObserver { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1419943) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -3467,6 +3467,10 @@ return maxResultSize; } + @Override + public long getReadPt() { + return this.readPt; + } /** * Reset both the filter and the old filter. */ @@ -3477,7 +3481,7 @@ } @Override - public synchronized boolean next(List outResults, int limit) + public boolean next(List outResults, int limit) throws IOException { return next(outResults, limit, null); } @@ -3497,30 +3501,42 @@ // This could be a new thread from the last time we called next(). MultiVersionConsistencyControl.setThreadReadPoint(this.readPt); - results.clear(); - - boolean returnResult = nextInternal(limit, metric); - - outResults.addAll(results); - resetFilters(); - if (isFilterDone()) { - return false; - } - return returnResult; + return nextRaw(outResults, limit, metric); } finally { closeRegionOperation(); } } @Override - public synchronized boolean next(List outResults) + public boolean nextRaw(List outResults) throws IOException { + return nextRaw(outResults, batch, null); + } + + @Override + public boolean nextRaw(List outResults, int limit, + String metric) throws IOException { + results.clear(); + + boolean returnResult = nextInternal(limit, metric); + + outResults.addAll(results); + resetFilters(); + if (isFilterDone()) { + return false; + } + return returnResult; + } + + @Override + public boolean next(List outResults) + throws IOException { // apply the batching limit by default return next(outResults, batch, null); } @Override - public synchronized boolean next(List outResults, String metric) + public boolean next(List outResults, String metric) throws IOException { // apply the batching limit by default return next(outResults, batch, metric); @@ -5274,7 +5290,7 @@ * @throws RegionTooBusyException if failed to get the lock in time * @throws InterruptedIOException if interrupted while waiting for a lock */ - private void startRegionOperation() + public void startRegionOperation() throws NotServingRegionException, RegionTooBusyException, InterruptedIOException { if (this.closing.get()) { throw new NotServingRegionException(regionInfo.getRegionNameAsString() + @@ -5292,7 +5308,7 @@ * Closes the lock. This needs to be called in the finally block corresponding * to the try block of #startRegionOperation */ - private void closeRegionOperation() { + public void closeRegionOperation() { lock.readLock().unlock(); } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1419943) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.io.StringWriter; import java.lang.Thread.UncaughtExceptionHandler; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -216,7 +215,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import org.cliffc.high_scale_lib.Counter; -import org.codehaus.jackson.map.ObjectMapper; import com.google.common.base.Function; import com.google.protobuf.ByteString; @@ -2922,20 +2920,30 @@ maxResultSize = maxScannerResultSize; } List values = new ArrayList(); - for (int i = 0; i < rows - && currentScanResultSize < maxResultSize; i++) { - // Collect values to be returned here - boolean moreRows = scanner.next(values); - if (!values.isEmpty()) { - for (KeyValue kv : values) { - currentScanResultSize += kv.heapSize(); + MultiVersionConsistencyControl.setThreadReadPoint(scanner.getReadPt()); + region.startRegionOperation(); + try { + int i = 0; + synchronized(scanner) { + for (; i < rows + && currentScanResultSize < maxResultSize; i++) { + // Collect values to be returned here + boolean moreRows = scanner.nextRaw(values); + if (!values.isEmpty()) { + for (KeyValue kv : values) { + currentScanResultSize += kv.heapSize(); + } + results.add(new Result(values)); + } + if (!moreRows) { + break; + } + values.clear(); } - results.add(new Result(values)); } - if (!moreRows) { - break; - } - values.clear(); + region.readRequestsCount.add(i); + } finally { + region.closeRegionOperation(); } // coprocessor postNext hook Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java (revision 1419943) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java (working copy) @@ -19,9 +19,11 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; /** @@ -56,4 +58,33 @@ * @return The preferred max buffersize. See {@link Scan#setMaxResultSize(long)} */ public long getMaxResultSize(); + + /** + * @return The Scanner's readPt + */ + public long getReadPt(); + + /** + * Grab the next row's worth of values with the default limit on the number of values + * to return. + * This is a special internal method to be called from coprocessor hooks to avoid expensive setup. + * Caller must synchronize on the scanner object. + * @param result return output array + * @return true if more rows exist after this one, false if scanner is done + * @throws IOException e + */ + public boolean nextRaw(List result) throws IOException; + + /** + * Grab the next row's worth of values with a limit on the number of values + * to return. + * This is a special internal method to be called from coprocessor hooks to avoid expensive setup. + * Caller must synchronize on the scanner object. + * @param result return output array + * @param limit limit on row count to get + * @param metric the metric name + * @return true if more rows exist after this one, false if scanner is done + * @throws IOException e + */ + public boolean nextRaw(List result, int limit, String metric) throws IOException; }