From 65d40d9fb84aca30c3a0a50ba4468f647214f5a5 Mon Sep 17 00:00:00 2001 From: Jonathan Lawlor Date: Fri, 1 May 2015 15:40:23 -0700 Subject: [PATCH] HBASE-13541 Deprecate Scan caching in 2.0.0 --- .../org/apache/hadoop/hbase/MetaTableAccessor.java | 14 ++--- .../hadoop/hbase/client/AbstractClientScanner.java | 9 ++-- .../apache/hadoop/hbase/client/ClientScanner.java | 25 +++++---- .../hbase/client/ClientSmallReversedScanner.java | 2 +- .../hadoop/hbase/client/ClientSmallScanner.java | 2 +- .../hbase/client/ConnectionImplementation.java | 47 ++++++++-------- .../org/apache/hadoop/hbase/client/HTable.java | 8 +-- .../hadoop/hbase/client/ReversedClientScanner.java | 2 +- .../java/org/apache/hadoop/hbase/client/Scan.java | 62 ++++++++++++++++------ .../hadoop/hbase/client/TableConfiguration.java | 16 +++--- .../client/coprocessor/AggregationClient.java | 10 ++-- .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 8 +-- .../hadoop/hbase/client/TestClientScanner.java | 22 ++++---- .../client/TestClientSmallReversedScanner.java | 4 +- .../hbase/client/TestClientSmallScanner.java | 4 +- .../java/org/apache/hadoop/hbase/HConstants.java | 10 +++- .../hbase/mapreduce/IntegrationTestImportTsv.java | 4 +- .../hbase/test/IntegrationTestBigLinkedList.java | 2 +- ...IntegrationTestBigLinkedListWithVisibility.java | 2 +- .../hadoop/hbase/rest/ScannerResultGenerator.java | 8 +-- .../apache/hadoop/hbase/rest/TableResource.java | 2 +- .../hadoop/hbase/rest/model/ScannerModel.java | 2 +- .../hadoop/hbase/mapreduce/TableInputFormat.java | 18 +++++-- .../mapreduce/replication/VerifyReplication.java | 11 ++-- .../java/org/apache/hadoop/hbase/tool/Canary.java | 4 +- .../apache/hadoop/hbase/PerformanceEvaluation.java | 41 +++++++++----- .../hadoop/hbase/ScanPerformanceEvaluation.java | 16 ++++-- .../hbase/TestPartialResultsFromClientSide.java | 15 +++--- .../hbase/client/TestClientScannerRPCTimeout.java | 4 +- .../hadoop/hbase/client/TestFromClientSide.java | 18 +++---- .../hadoop/hbase/client/TestReplicasClient.java | 8 +-- .../hadoop/hbase/client/TestScannerTimeout.java | 8 +-- .../hbase/client/TestScannersFromClientSide.java | 2 +- .../hadoop/hbase/client/TestSizeFailures.java | 4 +- .../regionserver/TestRegionServerMetrics.java | 40 ++++++++++---- .../regionserver/TestScannerHeartbeatMessages.java | 4 +- .../apache/hadoop/hbase/regionserver/TestTags.java | 7 ++- .../hadoop/hbase/thrift/ThriftServerRunner.java | 2 +- .../hadoop/hbase/thrift2/ThriftUtilities.java | 2 +- 39 files changed, 281 insertions(+), 188 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index ea29e4f41c954773bfa65063ad3ab4244f201eed..471ddc4b61d0d4faa83bc427a04b0b2f5dae34e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; @@ -34,8 +32,9 @@ import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.ServiceException; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -66,6 +65,9 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ServiceException; + /** * Read/write operations on region and assignment information store in * hbase:meta. @@ -560,7 +562,7 @@ public class MetaTableAccessor { scan.setSmall(true); } int rows = Math.min(rowUpperLimit, scannerCaching); - scan.setCaching(rows); + scan.setRowLimit(rows); return scan; } /** @@ -767,7 +769,7 @@ public class MetaTableAccessor { + " starting at row=" + Bytes.toStringBinary(startRow) + " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit - + " with caching=" + scan.getCaching()); + + " with rowLimit=" + scan.getRowLimit()); } int currentRow = 0; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index dc325a3cfe1cf5f50937e774383e9e9fc783f60e..f44b104e9c72317b5d57079d66c45ab682de4b5c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -54,12 +54,11 @@ public abstract class AbstractClientScanner implements ResultScanner { } /** - * Get nbRows rows. - * How many RPCs are made is determined by the {@link Scan#setCaching(int)} - * setting (or hbase.client.scanner.caching in hbase-site.xml). + * Get nbRows rows. How many RPCs are made is determined by the + * {@link Scan#setRowLimit(int)} and {@link Scan#setMaxResultSize(long)} limits * @param nbRows number of rows to return - * @return Between zero and nbRows RowResults. Scan is done - * if returned array is of zero-length (We never return null). + * @return Between zero and nbRows RowResults. Scan is done if returned array is + * of zero-length (We never return null). * @throws IOException */ @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 01ce47f9d08f55899a1e0b562242c3e07a081381..182433f8781fb34d1bed4857719ecb24284538f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -77,7 +77,7 @@ public class ClientScanner extends AbstractClientScanner { * via the methods {@link #addToPartialResults(Result)} and {@link #clearPartialResults()} */ protected byte[] partialResultsRow = null; - protected final int caching; + protected final int rowLimit; protected long lastNext; // Keep lastResult returned successfully in case we have to reset scanner. protected Result lastResult = null; @@ -138,13 +138,16 @@ public class ClientScanner extends AbstractClientScanner { // check if application wants to collect scan metrics initScanMetrics(scan); - // Use the caching from the Scan. If not set, use the default cache setting for this table. - if (this.scan.getCaching() > 0) { - this.caching = this.scan.getCaching(); + // Use the row limit from the Scan. If not set, use the default row limit setting for this + // table. + if (this.scan.getRowLimit() > 0) { + this.rowLimit = this.scan.getRowLimit(); } else { - this.caching = conf.getInt( + this.rowLimit = + HBaseConfiguration.getInt(conf, + HConstants.HBASE_CLIENT_SCANNER_ROW_LIMIT, HConstants.HBASE_CLIENT_SCANNER_CACHING, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_ROW_LIMIT); } this.caller = rpcFactory. newCaller(); @@ -156,7 +159,7 @@ public class ClientScanner extends AbstractClientScanner { protected void initializeScannerInConstruction() throws IOException{ // initialize the scanner - nextScanner(this.caching, false); + nextScanner(this.rowLimit, false); } protected ClusterConnection getConnection() { @@ -203,7 +206,7 @@ public class ClientScanner extends AbstractClientScanner { } protected int getCaching() { - return caching; + return rowLimit; } protected long getTimestamp() { @@ -329,7 +332,7 @@ public class ClientScanner extends AbstractClientScanner { s.setCaching(nbRows); ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(tableName, getConnection(), s, pool, primaryOperationTimeout, scan, - retries, scannerTimeout, caching, conf, caller); + retries, scannerTimeout, rowLimit, conf, caller); return sr; } @@ -383,11 +386,11 @@ public class ClientScanner extends AbstractClientScanner { protected void loadCache() throws IOException { Result[] values = null; long remainingResultSize = maxScannerResultSize; - int countdown = this.caching; + int countdown = this.rowLimit; // We need to reset it if it's a new callable that was created // with a countdown in nextScanner - callable.setCaching(this.caching); + callable.setCaching(this.rowLimit); // This flag is set when we want to skip the result returned. We do // this when we reset scanner because it split under us. boolean retryAfterOutOfOrderException = true; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java index 28502dc0244d9d42c78404b4dad117f9bb633072..1054629d1c2b463d8904b99e1ff21bd2cd3434f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java @@ -198,7 +198,7 @@ public class ClientSmallReversedScanner extends ReversedClientScanner { protected void loadCache() throws IOException { Result[] values = null; long remainingResultSize = maxScannerResultSize; - int countdown = this.caching; + int countdown = this.rowLimit; boolean currentRegionDone = false; // Values == null means server-side filter has determined we must STOP while (remainingResultSize > 0 && countdown > 0 diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java index 8e4a0d8d6e56ab240efc700fc571e23bcc04d45e..7c1e5cd58a79c21cacaba22f84e1adbd4a3ed9ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java @@ -244,7 +244,7 @@ public class ClientSmallScanner extends ClientScanner { protected void loadCache() throws IOException { Result[] values = null; long remainingResultSize = maxScannerResultSize; - int countdown = this.caching; + int countdown = this.rowLimit; boolean currentRegionDone = false; // Values == null means server-side filter has determined we must STOP while (remainingResultSize > 0 && countdown > 0 diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 35ff34fecb44341e7f8cb33b633895b2a5ce1097..77b9dd894e8023ade8eb4bcd673bd71e070547b6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -18,10 +18,25 @@ */ package org.apache.hadoop.hbase.client; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import java.io.Closeable; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.lang.reflect.UndeclaredThrowableException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.annotation.Nullable; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -69,24 +84,10 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; -import javax.annotation.Nullable; -import java.io.Closeable; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.lang.reflect.UndeclaredThrowableException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; /** * Main implementation of {@link Connection} and {@link ClusterConnection} interfaces. @@ -811,7 +812,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { s.setReversed(true); s.setStartRow(metaKey); s.setSmall(true); - s.setCaching(1); + s.setRowLimit(1); if (this.useMetaReplicas) { s.setConsistency(Consistency.TIMELINE); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index e0c29776b3a92b5e4979cfcff398a22038cfa458..ed1da4bf70589b540053b4b9984cf11314ad8c03 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -115,7 +115,7 @@ public class HTable implements HTableInterface { protected BufferedMutatorImpl mutator; private boolean autoFlush = true; private boolean closed = false; - protected int scannerCaching; + protected int scannerRowLimit; protected long scannerMaxResultSize; private ExecutorService pool; // For Multi & Scan private int operationTimeout; @@ -217,7 +217,7 @@ public class HTable implements HTableInterface { this.operationTimeout = tableName.isSystemTable() ? tableConfiguration.getMetaOperationTimeout() : tableConfiguration.getOperationTimeout(); - this.scannerCaching = tableConfiguration.getScannerCaching(); + this.scannerRowLimit = tableConfiguration.getScannerRowLimit(); this.scannerMaxResultSize = tableConfiguration.getScannerMaxResultSize(); if (this.rpcCallerFactory == null) { this.rpcCallerFactory = connection.getNewRpcRetryingCallerFactory(configuration); @@ -613,8 +613,8 @@ public class HTable implements HTableInterface { if (scan.getBatch() > 0 && scan.isSmall()) { throw new IllegalArgumentException("Small scan should not be used with batching"); } - if (scan.getCaching() <= 0) { - scan.setCaching(scannerCaching); + if (scan.getRowLimit() <= 0) { + scan.setRowLimit(scannerRowLimit); } if (scan.getMaxResultSize() <= 0) { scan.setMaxResultSize(scannerMaxResultSize); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java index ef4b89d6b46bc7d1ea7c3603e8393979df118325..49a4aa09fc8a3d5cff53d621e4d9a5e9ce271d90 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java @@ -137,7 +137,7 @@ public class ReversedClientScanner extends ClientScanner { s.setCaching(nbRows); ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(getTable(), getConnection(), s, pool, - primaryOperationTimeout, scan, getRetries(), getScannerTimeout(), caching, getConf(), + primaryOperationTimeout, scan, getRetries(), getScannerTimeout(), rowLimit, getConf(), caller); return sr; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 3b6194f99b6d88045f431a7f4cc1924bf5610449..af17a312d88617b820bdd6c38d67648fa9a610f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -53,9 +53,9 @@ import org.apache.hadoop.hbase.util.Bytes; *

* To scan everything for each row, instantiate a Scan object. *

- * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. - * If caching is NOT set, we will use the caching value of the hosting {@link Table}. - * In addition to row caching, it is possible to specify a + * To modify the scanner row limit for just this scan, use {@link #setRowLimit}. + * If the row limit is NOT set, we will use the row limit value of the hosting {@link Table}. + * In addition to a row limit, it is possible to specify a * maximum result size, using {@link #setMaxResultSize(long)}. When both are used, * single server requests are limited by either number of rows or maximum result size, whichever * limit comes first. @@ -134,10 +134,10 @@ public class Scan extends Query { // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName)) static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name"; - /* - * -1 means no caching + /** + * -1 means use the default value {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_ROW_LIMIT}. */ - private int caching = -1; + private int rowLimit = -1; private long maxResultSize = -1; private boolean cacheBlocks = true; private boolean reversed = false; @@ -212,7 +212,7 @@ public class Scan extends Query { batch = scan.getBatch(); storeLimit = scan.getMaxResultsPerColumnFamily(); storeOffset = scan.getRowOffsetPerColumnFamily(); - caching = scan.getCaching(); + rowLimit = scan.getRowLimit(); maxResultSize = scan.getMaxResultSize(); cacheBlocks = scan.getCacheBlocks(); getScan = scan.isGetScan(); @@ -488,14 +488,35 @@ public class Scan extends Query { } /** - * Set the number of rows for caching that will be passed to scanners. - * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will - * apply. - * Higher caching values will enable faster scanners but will use more memory. + * Set the number of rows for caching that will be passed to scanners. If not set, the + * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher + * caching values will enable faster scanners but will use more memory. * @param caching the number of rows for caching + * @deprecated since 2.0.0 use {@link #setRowLimit(int)} instead */ public Scan setCaching(int caching) { - this.caching = caching; + return setRowLimit(caching); + } + + /** + * Set the row limit that will be used by the scanner. If not set, the Configuration setting + * {@link HConstants#HBASE_CLIENT_SCANNER_ROW_LIMIT} will apply. The row limit specifies the + * maximum number of {@link Result}s that can be transferred between the client and the server + * per Scan RPC. + *

+ * In general, the row limit should not be changed from its default value + * {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_ROW_LIMIT} unless it makes sense to limit the + * Scan RPC's on the basis of numbers of rows rather than size (see + * {@link #setMaxResultSize(long)}). + *

+ * For example, this value could be configured in the case that the user knows the number of rows + * that will be required from the scanner beforehand. Setting the row limit in this scenario + * would allow the server to complete the Scan RPC faster + * @param rowLimit the maximum number of {@link Result}s to transfer between the client and the + * server per Scan RPC + */ + public Scan setRowLimit(int rowLimit) { + this.rowLimit = rowLimit; return this; } @@ -615,9 +636,18 @@ public class Scan extends Query { /** * @return caching the number of rows fetched when calling next on a scanner + * @deprecated since 2.0.0 use {@link #getRowLimit()} instead */ public int getCaching() { - return this.caching; + return getRowLimit(); + } + + /** + * @return row limit, the maximum number of {@link Result}s that the server can return per Scan + * RPC + */ + public int getRowLimit() { + return this.rowLimit; } /** @@ -787,7 +817,9 @@ public class Scan extends Query { map.put("stopRow", Bytes.toStringBinary(this.stopRow)); map.put("maxVersions", this.maxVersions); map.put("batch", this.batch); - map.put("caching", this.caching); + // TODO: remove caching from the map when the caching API is removed. + map.put("caching", this.rowLimit); + map.put("rowLimit", this.rowLimit); map.put("maxResultSize", this.maxResultSize); map.put("cacheBlocks", this.cacheBlocks); map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand); @@ -941,7 +973,7 @@ public class Scan extends Query { Scan scan = new Scan(row); scan.setSmall(true); scan.setReversed(true); - scan.setCaching(1); + scan.setRowLimit(1); return scan; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java index 901e86d3043507f7d6f6a9893792ae48ffcaed30..374c279645e94b0ac62605f1438f7af20179091a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java @@ -12,6 +12,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -36,7 +37,7 @@ public class TableConfiguration { private final long writeBufferSize; private final int metaOperationTimeout; private final int operationTimeout; - private final int scannerCaching; + private final int scannerRowLimit; private final long scannerMaxResultSize; private final int primaryCallTimeoutMicroSecond; private final int replicaCallTimeoutMicroSecondScan; @@ -57,8 +58,11 @@ public class TableConfiguration { this.operationTimeout = conf.getInt( HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.scannerCaching = conf.getInt( - HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + this.scannerRowLimit = + HBaseConfiguration.getInt(conf, + HConstants.HBASE_CLIENT_SCANNER_ROW_LIMIT, + HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_ROW_LIMIT); this.scannerMaxResultSize = conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, @@ -86,7 +90,7 @@ public class TableConfiguration { this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT; this.metaOperationTimeout = HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT; this.operationTimeout = HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT; - this.scannerCaching = HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING; + this.scannerRowLimit = HConstants.DEFAULT_HBASE_CLIENT_SCANNER_ROW_LIMIT; this.scannerMaxResultSize = HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE; this.primaryCallTimeoutMicroSecond = 10000; this.replicaCallTimeoutMicroSecondScan = 1000000; @@ -106,8 +110,8 @@ public class TableConfiguration { return operationTimeout; } - public int getScannerCaching() { - return scannerCaching; + public int getScannerRowLimit() { + return scannerRowLimit; } public int getPrimaryCallTimeoutMicroSecond() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 5421e57a43f7a58000a68f64449cad6776354e94..6e4556548552a4fdc973389122fb1ae43590f85f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -752,11 +752,11 @@ public class AggregationClient implements Closeable { if (startRow != null) scan2.setStartRow(startRow); ResultScanner scanner = null; try { - int cacheSize = scan2.getCaching(); - if (!scan2.getCacheBlocks() || scan2.getCaching() < 2) { + int rowLimit = scan2.getRowLimit(); + if (!scan2.getCacheBlocks() || scan2.getRowLimit() < 2) { scan2.setCacheBlocks(true); - cacheSize = 5; - scan2.setCaching(cacheSize); + rowLimit = 5; + scan2.setRowLimit(rowLimit); } scanner = table.getScanner(scan2); Result[] results = null; @@ -765,7 +765,7 @@ public class AggregationClient implements Closeable { byte[] weightQualifier = weighted ? quals.pollLast() : qualifier; R value = null; do { - results = scanner.next(cacheSize); + results = scanner.next(rowLimit); if (results != null && results.length > 0) { for (int i = 0; i < results.length; i++) { Result r = results[i]; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 8b5b2d766ee92207f5aa47a13ebdf3e30840f071..4750b5d41869681afb86158f47c295d7c277a914 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -122,12 +122,12 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; -import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; @@ -929,8 +929,8 @@ public final class ProtobufUtil { if (scan.getConsistency() == Consistency.TIMELINE) { scanBuilder.setConsistency(toConsistency(scan.getConsistency())); } - if (scan.getCaching() > 0) { - scanBuilder.setCaching(scan.getCaching()); + if (scan.getRowLimit() > 0) { + scanBuilder.setCaching(scan.getRowLimit()); } return scanBuilder.build(); } @@ -1015,7 +1015,7 @@ public final class ProtobufUtil { scan.setConsistency(toConsistency(proto.getConsistency())); } if (proto.hasCaching()) { - scan.setCaching(proto.getCaching()); + scan.setRowLimit(proto.getCaching()); } return scan; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java index a91def38b91e272bdf636cbefaad4835b72ee35d..b55567918ca27d5e7dde0cdef1d1a6337b732237 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java @@ -114,7 +114,7 @@ public class TestClientScanner { s.setCaching(nbRows); ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(getTable(), getConnection(), s, pool, primaryOperationTimeout, scan, - getRetries(), scannerTimeout, caching, conf, caller); + getRetries(), scannerTimeout, rowLimit, conf, caller); return sr; } @@ -156,8 +156,8 @@ public class TestClientScanner { } }); - // Set a much larger cache and buffer size than we'll provide - scan.setCaching(100); + // Set a much larger row limit and buffer size than we'll provide + scan.setRowLimit(100); scan.setMaxResultSize(1000*1000); try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), @@ -219,8 +219,8 @@ public class TestClientScanner { Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - // Set a much larger cache - scan.setCaching(100); + // Set a much larger row limit + scan.setRowLimit(100); // The single key-value will exit the loop scan.setMaxResultSize(1); @@ -250,7 +250,7 @@ public class TestClientScanner { @Test @SuppressWarnings("unchecked") - public void testCacheLimit() throws IOException { + public void testRowLimit() throws IOException { KeyValue kv1 = new KeyValue("row1".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, Type.Maximum), kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, Type.Maximum), kv3 = new KeyValue("row3".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, @@ -287,7 +287,7 @@ public class TestClientScanner { Mockito.when(rpcFactory. newCaller()).thenReturn(caller); // Set a small cache - scan.setCaching(1); + scan.setRowLimit(1); // Set a very large size scan.setMaxResultSize(1000*1000); @@ -367,8 +367,8 @@ public class TestClientScanner { Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - // Set a much larger cache and buffer size than we'll provide - scan.setCaching(100); + // Set a much larger row limit and buffer size than we'll provide + scan.setRowLimit(100); scan.setMaxResultSize(1000*1000); try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), @@ -443,8 +443,8 @@ public class TestClientScanner { } }); - // Set a much larger cache and buffer size than we'll provide - scan.setCaching(100); + // Set a much larger row limit and buffer size than we'll provide + scan.setRowLimit(100); scan.setMaxResultSize(1000*1000); try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallReversedScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallReversedScanner.java index 4611d08dfe141057b0a1c8514b25057f2aaa86bc..5d78a4d830b8677732d3f794295d7554799ac455 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallReversedScanner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallReversedScanner.java @@ -202,7 +202,7 @@ public class TestClientSmallReversedScanner { .mock(ScannerCallableWithReplicas.class); // While the server returns 2 records per batch, we expect more records. - scan.setCaching(2); + scan.setRowLimit(2); SmallScannerCallableFactory factory = getFactory(callableWithReplicas); @@ -278,7 +278,7 @@ public class TestClientSmallReversedScanner { .mock(ScannerCallableWithReplicas.class); // While the server return 2 records per RPC, we expect there to be more records. - scan.setCaching(2); + scan.setRowLimit(2); SmallScannerCallableFactory factory = getFactory(callableWithReplicas); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallScanner.java index 90bf4bbd3abe82ef8671f5725cfcd55e3415bf2c..c2e41757d34770d6835896ef725b012157588c2b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallScanner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallScanner.java @@ -200,7 +200,7 @@ public class TestClientSmallScanner { .mock(ScannerCallableWithReplicas.class); // While the server returns 2 records per batch, we expect more records. - scan.setCaching(2); + scan.setRowLimit(2); SmallScannerCallableFactory factory = getFactory(callableWithReplicas); try (ClientSmallScanner css = new ClientSmallScanner(conf, scan, TableName.valueOf("table"), @@ -271,7 +271,7 @@ public class TestClientSmallScanner { .mock(ScannerCallableWithReplicas.class); // While the server return 2 records per RPC, we expect there to be more records. - scan.setCaching(2); + scan.setRowLimit(2); SmallScannerCallableFactory factory = getFactory(callableWithReplicas); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9bf3e515da478faa6351da319915dd64363bb834..4e31ea996a93836df6c86a85cd9dfab80ec0d047 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -713,13 +713,19 @@ public final class HConstants { /** * Parameter name to set the default scanner caching for all clients. + * @deprecated since 2.0.0 use {@link #HBASE_CLIENT_SCANNER_ROW_LIMIT} instead */ public static final String HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching"; /** - * Default value for {@link #HBASE_CLIENT_SCANNER_CACHING} + * Parameter name to set the default scanner row limit for all clients. */ - public static final int DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE; + public static final String HBASE_CLIENT_SCANNER_ROW_LIMIT = "hbase.client.scanner.row.limit"; + + /** + * Default value for {@link #HBASE_CLIENT_SCANNER_ROW_LIMIT} + */ + public static final int DEFAULT_HBASE_CLIENT_SCANNER_ROW_LIMIT = Integer.MAX_VALUE; /** * Parameter name for number of rows that will be fetched when calling next on diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index 4c27ec81a065b63c7db2c28a5efa00518ca1dd45..79ccb3b7329e2a33d8365302327b494541cae1da 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -38,14 +38,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.IntegrationTestingUtility; -import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; import org.apache.hadoop.util.GenericOptionsParser; @@ -141,7 +140,6 @@ public class IntegrationTestImportTsv implements Configurable, Tool { Table table = null; Scan scan = new Scan() {{ setCacheBlocks(false); - setCaching(1000); }}; try { table = util.getConnection().getTable(tableName); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index 1fe530686f48687564792e108c272107d133c332..389360516c314261ebe76d9500de8f9fc9c9f87e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -1068,7 +1068,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { Scan scan = new Scan(); scan.addColumn(FAMILY_NAME, COLUMN_PREV); - scan.setCaching(10000); + scan.setRowLimit(10000); scan.setCacheBlocks(false); if (isMultiUnevenColumnFamilies()) { scan.addColumn(BIG_FAMILY_NAME, BIG_FAMILY_NAME); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index a49d9cad2920c11c97eb9e92bd57ddfc1ab25085..cc91453d592b1c809933b1555b82ccc4b3b96a58 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -426,7 +426,7 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB Scan scan = new Scan(); scan.addColumn(FAMILY_NAME, COLUMN_PREV); - scan.setCaching(10000); + scan.setRowLimit(10000); scan.setCacheBlocks(false); String[] split = labels.split(COMMA); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java index 9a8ee001ec9a5216db4eef3a303c74cccdb7763b..af663ae79df91747c9cdf8beabee0c5fedb2db46 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -24,10 +24,10 @@ import java.util.Iterator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -65,7 +65,7 @@ public class ScannerResultGenerator extends ResultGenerator { } public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final int caching, final boolean cacheBlocks) + final Filter filter, final int rowLimit, final boolean cacheBlocks) throws IllegalArgumentException, IOException { Table table = RESTServlet.getInstance().getTable(tableName); try { @@ -93,8 +93,8 @@ public class ScannerResultGenerator extends ResultGenerator { if (filter != null) { scan.setFilter(filter); } - if (caching > 0 ) { - scan.setCaching(caching); + if (rowLimit > 0 ) { + scan.setRowLimit(rowLimit); } scan.setCacheBlocks(cacheBlocks); if (rowspec.hasLabels()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index 556425ff69b053a29bf0505760c765f7eae41aa9..b4a380e2878ab7a460061e5c926aca1ec683a0cb 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -185,7 +185,7 @@ public class TableResource extends ResourceBase { tableScan.setFilter(filter); } int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); - tableScan.setCaching(fetchSize); + tableScan.setRowLimit(fetchSize); return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit); } catch (Exception exp) { servlet.getMetrics().incrementFailedScanRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 25a6de32e562aae4efd921601d1a8f439eaed9b7..6520a91b2424e3803cc3b71f67364c73fe4d2d92 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -523,7 +523,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { } model.setStartTime(scan.getTimeRange().getMin()); model.setEndTime(scan.getTimeRange().getMax()); - int caching = scan.getCaching(); + int caching = scan.getRowLimit(); if (caching > 0) { model.setCaching(caching); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index bc2537b7517fa40cab37e5f4b8686074febf0ffc..a021bad60d43cb070e48a506a7193fb786ffac95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -35,10 +35,10 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.util.StringUtils; /** @@ -81,7 +81,12 @@ implements Configurable { public static final String SCAN_MAXVERSIONS = "hbase.mapreduce.scan.maxversions"; /** Set to false to disable server-side caching of blocks for this scan. */ public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks"; - /** The number of rows for caching that will be passed to scanners. */ + /** The value to be used as the row limit of the scanner. */ + public static final String SCAN_ROWLIMIT = "hbase.mapreduce.scan.rowlimit"; + /** + * The number of rows for caching that will be passed to scanners. + * @deprecated since HBase 2.0.0, use {@link #SCAN_ROWLIMIT} instead + */ public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows"; /** Set the maximum number of values to return for each call to next(). */ public static final String SCAN_BATCHSIZE = "hbase.mapreduce.scan.batchsize"; @@ -156,8 +161,11 @@ implements Configurable { scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS))); } - if (conf.get(SCAN_CACHEDROWS) != null) { - scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS))); + // If SCAN_ROWLIMIT is not defined, check for the deprecated configuration. + if (conf.get(SCAN_ROWLIMIT) != null) { + scan.setRowLimit(Integer.parseInt(conf.get(SCAN_ROWLIMIT))); + } else if (conf.get(SCAN_CACHEDROWS) != null) { + scan.setRowLimit(Integer.parseInt(conf.get(SCAN_CACHEDROWS))); } if (conf.get(SCAN_BATCHSIZE) != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 28f9f39f77904efe0fc0affbe47c6e08fca0e6ab..2f596309c3a21c4b85c937abd63c9cabaf08d2ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -42,13 +42,13 @@ import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; -import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.util.Tool; @@ -110,7 +110,12 @@ public class VerifyReplication extends Configured implements Tool { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); final Scan scan = new Scan(); - scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1)); + + // If SCAN_ROWLIMIT is not configured, use the deprecated configuration. + String rowLimit = conf.get(TableInputFormat.SCAN_ROWLIMIT); + if (rowLimit == null) rowLimit = conf.get(TableInputFormat.SCAN_CACHEDROWS); + scan.setRowLimit(conf.getInt(rowLimit, 1)); + long startTime = conf.getLong(NAME + ".startTime", 0); long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE); String families = conf.get(NAME + ".families", null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 3e4d35b3a3bb9401b4f522df68ccdb74572cd4be..dc4b856ccbe73146dff0632f68d03738983096f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -179,7 +179,7 @@ public final class Canary implements Tool { } else { scan = new Scan(); scan.setRaw(true); - scan.setCaching(1); + scan.setRowLimit(1); scan.setCacheBlocks(false); scan.setFilter(new FirstKeyOnlyFilter()); scan.addFamily(column.getName()); @@ -260,7 +260,7 @@ public final class Canary implements Tool { scan = new Scan(); scan.setCacheBlocks(false); scan.setFilter(new FirstKeyOnlyFilter()); - scan.setCaching(1); + scan.setRowLimit(1); scan.setMaxResultSize(1L); stopWatch.start(); ResultScanner s = table.getScanner(scan); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 569ef719a5e6ad73874ca09ff2af6e6daa043002..01abb4bd4f889bc8abe6fe155983ba074d4c23ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -75,7 +75,11 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration; import org.apache.hadoop.hbase.trace.SpanReceiverHost; -import org.apache.hadoop.hbase.util.*; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Hash; +import org.apache.hadoop.hbase.util.MurmurHash; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.YammerHistogramUtils; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; @@ -85,11 +89,11 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.codehaus.jackson.map.ObjectMapper; import org.apache.htrace.Sampler; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; import org.apache.htrace.impl.ProbabilitySampler; +import org.codehaus.jackson.map.ObjectMapper; import com.google.common.base.Objects; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -612,7 +616,7 @@ public class PerformanceEvaluation extends Configured implements Tool { int period = (this.perClientRunRows / 10) == 0? perClientRunRows: perClientRunRows / 10; int cycles = 1; int columns = 1; - int caching = 30; + int rowLimit = 30; boolean addColumns = true; public TestOptions() {} @@ -657,15 +661,15 @@ public class PerformanceEvaluation extends Configured implements Tool { this.measureAfter = that.measureAfter; this.addColumns = that.addColumns; this.columns = that.columns; - this.caching = that.caching; + this.rowLimit = that.rowLimit; } - public int getCaching() { - return this.caching; + public int getRowLimit() { + return this.rowLimit; } - public void setCaching(final int caching) { - this.caching = caching; + public void setRowLimit(final int rowLimit) { + this.rowLimit = rowLimit; } public int getColumns() { @@ -1211,7 +1215,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows)); - scan.setCaching(opts.caching); + scan.setRowLimit(opts.rowLimit); FilterList list = new FilterList(); if (opts.addColumns) { scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); @@ -1247,7 +1251,7 @@ public class PerformanceEvaluation extends Configured implements Tool { void testRow(final int i) throws IOException { Pair startAndStopRow = getStartAndStopRow(); Scan scan = new Scan(startAndStopRow.getFirst(), startAndStopRow.getSecond()); - scan.setCaching(opts.caching); + scan.setRowLimit(opts.rowLimit); if (opts.filterAll) { scan.setFilter(new FilterAllFilter()); } @@ -1442,7 +1446,7 @@ public class PerformanceEvaluation extends Configured implements Tool { void testRow(final int i) throws IOException { if (this.testScanner == null) { Scan scan = new Scan(format(opts.startRow)); - scan.setCaching(opts.caching); + scan.setRowLimit(opts.rowLimit); if (opts.addColumns) { scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); } else { @@ -1543,7 +1547,7 @@ public class PerformanceEvaluation extends Configured implements Tool { list.addFilter(new FilterAllFilter()); } Scan scan = new Scan(); - scan.setCaching(opts.caching); + scan.setRowLimit(opts.rowLimit); if (opts.addColumns) { scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); } else { @@ -1752,7 +1756,9 @@ public class PerformanceEvaluation extends Configured implements Tool { System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table."); System.err.println(" randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); System.err.println(" columns Columns to write per row. Default: 1"); - System.err.println(" caching Scan caching to use. Default: 30"); + System.err.println(" caching Scan caching to use. Deprecated since 2.0.0, " + + " use rowLimit instead. Default: 30"); + System.err.println(" rowLimit Scan row limit to use. Default: 30"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); @@ -1989,9 +1995,16 @@ public class PerformanceEvaluation extends Configured implements Tool { continue; } + final String rowLimit = "--rowLimit="; + if (cmd.startsWith(rowLimit)) { + opts.rowLimit = Integer.parseInt(cmd.substring(rowLimit.length())); + continue; + } + + // deprecated since 2.0.0, use --rowLimit instead final String caching = "--caching="; if (cmd.startsWith(caching)) { - opts.caching = Integer.parseInt(cmd.substring(caching.length())); + opts.rowLimit = Integer.parseInt(cmd.substring(caching.length())); continue; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index 24e9590d9c7b7ebc1d78a2a8deb1834ce239a4a8..c20a0d0fcaf59f1bc1173f1225d7954025c5e556 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -63,7 +63,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { private String tablename; private String snapshotName; private String restoreDir; - private String caching; + private String rowLimit; @Override public void setConf(Configuration conf) { @@ -84,7 +84,9 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { this.addOptWithArg("tn", "table", "the tablename to read from"); this.addOptWithArg("sn", "snapshot", "the snapshot name to read from"); this.addOptWithArg("rs", "restoredir", "the directory to restore the snapshot"); - this.addOptWithArg("ch", "caching", "scanner caching value"); + this.addOptWithArg("ch", "caching", + "scanner caching value. Deprecated since HBase 2.0.0, used rowLimit instead"); + this.addOptWithArg("rl", "rowLimit", "scanner row limit value"); } @Override @@ -94,7 +96,11 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { tablename = cmd.getOptionValue("table"); snapshotName = cmd.getOptionValue("snapshot"); restoreDir = cmd.getOptionValue("restoredir"); - caching = cmd.getOptionValue("caching"); + + // Row limit takes precedence over caching option. Only use deprecated caching option if row + // limit was not provided + rowLimit = cmd.getOptionValue("rowLimit"); + if (rowLimit == null || rowLimit.isEmpty()) rowLimit = cmd.getOptionValue("caching"); } protected void testHdfsStreaming(Path filename) throws IOException { @@ -135,8 +141,8 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { scan.setCacheBlocks(false); scan.setMaxVersions(1); scan.setScanMetricsEnabled(true); - if (caching != null) { - scan.setCaching(Integer.parseInt(caching)); + if (rowLimit != null) { + scan.setRowLimit(Integer.parseInt(rowLimit)); } return scan; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index 3794e5902385adc40a6662f1c39d26df177ebda9..a85722f908e3155e081021790ea10c77c9ee25b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -253,7 +253,7 @@ public class TestPartialResultsFromClientSide { // Scan that retrieves all table results in single RPC request Scan oneShotScan = new Scan(basePartialScan); oneShotScan.setMaxResultSize(Long.MAX_VALUE); - oneShotScan.setCaching(ROWS.length); + oneShotScan.setRowLimit(ROWS.length); ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan); Result oneShotResult = oneShotScanner.next(); @@ -539,17 +539,18 @@ public class TestPartialResultsFromClientSide { public void testPartialResultsAndCaching() throws Exception { for (int caching = 1; caching <= NUM_ROWS; caching++) { for (int maxResultRows = 0; maxResultRows <= NUM_ROWS; maxResultRows++) { - testPartialResultsAndCaching(maxResultRows, caching); + testPartialResultsAndRowLimit(maxResultRows, caching); } } } /** - * @param resultSizeRowLimit The row limit that will be enforced through maxResultSize - * @param cachingRowLimit The row limit that will be enforced through caching + * @param resultSizeRowLimit The row limit that will be enforced via + * {@link Scan#setMaxResultSize(long)} + * @param rowLimit The row limit that will be enforced via {@link Scan#setRowLimit(int)} * @throws Exception */ - public void testPartialResultsAndCaching(int resultSizeRowLimit, int cachingRowLimit) + public void testPartialResultsAndRowLimit(int resultSizeRowLimit, int rowLimit) throws Exception { Scan scan = new Scan(); scan.setAllowPartialResults(true); @@ -561,7 +562,7 @@ public class TestPartialResultsFromClientSide { int cellOffset = NUM_COLS / 3; long maxResultSize = getResultSizeForNumberOfCells(resultSizeRowLimit * NUM_COLS + cellOffset); scan.setMaxResultSize(maxResultSize); - scan.setCaching(cachingRowLimit); + scan.setRowLimit(rowLimit); ResultScanner scanner = TABLE.getScanner(scan); ClientScanner clientScanner = (ClientScanner) scanner; @@ -570,7 +571,7 @@ public class TestPartialResultsFromClientSide { // Approximate the number of rows we expect will fit into the specified max rsult size. If this // approximation is less than caching, then we expect that the max result size limit will be // hit before the caching limit and thus partial results may be seen - boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit; + boolean expectToSeePartialResults = resultSizeRowLimit < rowLimit; while ((r = clientScanner.next()) != null) { assertTrue(!r.isPartial() || expectToSeePartialResults); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java index 8af1d9f573b8a5374d3093ae995e8251667c67d0..8ec14fbf6ba3df29633f940660fa0013a792813d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java @@ -25,11 +25,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.ipc.AbstractRpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; @@ -95,7 +95,7 @@ public class TestClientScannerRPCTimeout { LOG.info("Wrote our three values"); RSRpcServicesWithScanTimeout.seqNoToSleepOn = 1; Scan scan = new Scan(); - scan.setCaching(1); + scan.setRowLimit(1); ResultScanner scanner = ht.getScanner(scan); Result result = scanner.next(); assertTrue("Expected row: row-1", Bytes.equals(r1, result.getRow())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 21df1e965f334f1d3334e2c496a2589590e2e34f..4ca00e061f95e8179270a7dacb26e4f753eaaf64 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -28,7 +28,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; -import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -43,12 +42,10 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReference; -import org.apache.log4j.Level; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -90,7 +87,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.Region; @@ -100,8 +96,8 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; import org.junit.After; @@ -4981,7 +4977,7 @@ public class TestFromClientSide { // turn on scan metrics Scan scan2 = new Scan(); scan2.setScanMetricsEnabled(true); - scan2.setCaching(numRecords+1); + scan2.setRowLimit(numRecords + 1); scanner = ht.getScanner(scan2); for (Result result : scanner.next(numRecords - 1)) { } @@ -4989,10 +4985,10 @@ public class TestFromClientSide { // closing the scanner will set the metrics. assertNotNull(scan2.getScanMetrics()); - // set caching to 1, because metrics are collected in each roundtrip only + // set row limit to 1, because metrics are collected in each roundtrip only scan2 = new Scan(); scan2.setScanMetricsEnabled(true); - scan2.setCaching(1); + scan2.setRowLimit(1); scanner = ht.getScanner(scan2); // per HBASE-5717, this should still collect even if you don't run all the way to // the end of the scanner. So this is asking for 2 of the 3 rows we inserted. @@ -5021,8 +5017,8 @@ public class TestFromClientSide { // finally, test that the metrics are collected correctly if you both run past all the records, // AND close the scanner Scan scanWithClose = new Scan(); - // make sure we can set caching up to the number of a scanned values - scanWithClose.setCaching(numRecords); + // make sure we can set row limit up to the number of a scanned values + scanWithClose.setRowLimit(numRecords); scanWithClose.setScanMetricsEnabled(true); ResultScanner scannerWithClose = ht.getScanner(scanWithClose); for (Result result : scannerWithClose.next(numRecords + 1)) { @@ -5598,7 +5594,7 @@ public class TestFromClientSide { // small scan Scan scan = new Scan(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); scan.setSmall(true); - scan.setCaching(2); + scan.setRowLimit(2); scanner = table.getScanner(scan); count = 0; for (Result r : scanner) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index bfc1230d87ff409f427fb731b5f5219601e909d0..c5370a35a6273b2dcc22788f2a4f6a16b7cd5c47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -621,7 +621,7 @@ public class TestReplicasClient { table.put(p); } LOG.debug("PUT done"); - int caching = 20; + int rowLimit = 20; byte[] start; start = Bytes.toBytes("testUseRegionWithReplica" + 0); @@ -635,7 +635,7 @@ public class TestReplicasClient { SlowMeCopro.sleepTime.set(5000); Scan scan = new Scan(start); - scan.setCaching(caching); + scan.setRowLimit(rowLimit); scan.setConsistency(Consistency.TIMELINE); ResultScanner scanner = table.getScanner(scan); Iterator iter = scanner.iterator(); @@ -739,11 +739,11 @@ public class TestReplicasClient { } private void scanWithReplicas(boolean reversed, boolean small, Consistency consistency, - int caching, long maxResultSize, byte[] startRow, int numRows, int numCols, + int rowLimit, long maxResultSize, byte[] startRow, int numRows, int numCols, boolean staleExpected, boolean slowNext) throws Exception { Scan scan = new Scan(startRow); - scan.setCaching(caching); + scan.setRowLimit(rowLimit); scan.setMaxResultSize(maxResultSize); scan.setReversed(reversed); scan.setSmall(small); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java index f86f24835cef6df696a8ce0bbc774de72b40c320..ba4bdbc53121e9f3410c076536718d8562f4a6a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java @@ -55,7 +55,7 @@ public class TestScannerTimeout { // the mini cluster coming up -- the verification in particular. private final static int THREAD_WAKE_FREQUENCY = 1000; private final static int SCANNER_TIMEOUT = 15000; - private final static int SCANNER_CACHING = 5; + private final static int SCANNER_ROW_LIMIT = 5; /** * @throws java.lang.Exception @@ -100,7 +100,7 @@ public class TestScannerTimeout { public void test2481() throws Exception { LOG.info("START ************ test2481"); Scan scan = new Scan(); - scan.setCaching(1); + scan.setRowLimit(1); Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME); ResultScanner r = table.getScanner(scan); int count = 0; @@ -167,7 +167,7 @@ public class TestScannerTimeout { LOG.info("START ************ TEST3686A---1111"); Scan scan = new Scan(); - scan.setCaching(SCANNER_CACHING); + scan.setRowLimit(SCANNER_ROW_LIMIT); LOG.info("************ TEST3686A"); MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getHBaseAdmin().getConnection()); // Set a very high timeout, we want to test what happens when a RS @@ -211,7 +211,7 @@ public class TestScannerTimeout { LOG.info("START ************ test3686b"); HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); Scan scan = new Scan(); - scan.setCaching(SCANNER_CACHING); + scan.setRowLimit(SCANNER_ROW_LIMIT); // Set a very high timeout, we want to test what happens when a RS // fails but the region is recovered before the lease times out. // Since the RS is already created, this conf is client-side only for diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index fc04bf0674ba6ef5a4c2bd16de2cd812c0ecc8b9..947ef727452cbf7961a4c0cfa7487874dba60d49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -278,7 +278,7 @@ public class TestScannersFromClientSide { scan = new Scan(baseScan); scan.setMaxResultSize(1); - scan.setCaching(Integer.MAX_VALUE); + scan.setRowLimit(Integer.MAX_VALUE); verifyExpectedCounts(table, scan, rows, columns); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java index 71b2d724e6708c0c91121a1c6e297bae43334089..aad161c1fc28f69e0114624106fd1ba8b9f56473 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java @@ -119,7 +119,7 @@ public class TestSizeFailures { s.addFamily(FAMILY); s.setMaxResultSize(-1); s.setBatch(-1); - s.setCaching(500); + s.setRowLimit(500); Entry entry = sumTable(table.getScanner(s)); long rowsObserved = entry.getKey(); long entriesObserved = entry.getValue(); @@ -142,7 +142,7 @@ public class TestSizeFailures { s.addFamily(FAMILY); s.setMaxResultSize(-1); s.setBatch(-1); - s.setCaching(500); + s.setRowLimit(500); Entry entry = sumTable(table.getScanner(s)); long rowsObserved = entry.getKey(); long entriesObserved = entry.getValue(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 5498d6697fc7a71d733bb5bd45e9472b2b6d2513..3780c79aed5054d1e3ae4e75b7ac4527871757f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -17,9 +17,35 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -32,12 +58,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import static org.junit.Assert.*; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - @Category({RegionServerTests.class, MediumTests.class}) public class TestRegionServerMetrics { @@ -342,7 +362,7 @@ public class TestRegionServerMetrics { Scan s = new Scan(); s.setBatch(1); - s.setCaching(1); + s.setRowLimit(1); ResultScanner resultScanners = t.getScanner(s); for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { @@ -392,7 +412,7 @@ public class TestRegionServerMetrics { Scan s = new Scan(); s.setSmall(true); - s.setCaching(1); + s.setRowLimit(1); ResultScanner resultScanners = t.getScanner(s); for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java index df73ddd648ed468ab7a2fa5ea636b0f4113246ab..a40daf10f5531a84d246c0224072f6595f0be2d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java @@ -242,7 +242,7 @@ public class TestScannerHeartbeatMessages { // the case where a scan stops on the server side due to a time limit Scan scan = new Scan(); scan.setMaxResultSize(Long.MAX_VALUE); - scan.setCaching(Integer.MAX_VALUE); + scan.setRowLimit(Integer.MAX_VALUE); testEquivalenceOfScanWithHeartbeats(scan, DEFAULT_ROW_SLEEP_TIME, -1, false); return null; @@ -262,7 +262,7 @@ public class TestScannerHeartbeatMessages { // the case where a scan stops on the server side due to a time limit Scan baseScan = new Scan(); baseScan.setMaxResultSize(Long.MAX_VALUE); - baseScan.setCaching(Integer.MAX_VALUE); + baseScan.setRowLimit(Integer.MAX_VALUE); // Copy the scan before each test. When a scan object is used by a scanner, some of its // fields may be changed such as start row diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index 9c99a436236e9f0928f77cb0a76ae07128f64cf8..12a6996fcfb177be38f74e13b5b2cc02e8962bf0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -34,14 +34,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -56,6 +53,8 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.AfterClass; @@ -315,7 +314,7 @@ public class TestTags { TestCoprocessorForTags.checkTagPresence = true; Scan s = new Scan(row); - s.setCaching(1); + s.setRowLimit(1); ResultScanner scanner = table.getScanner(s); try { Result next = null; diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index daf320ce326d7f176a2dce0fe47bd1d08e93d52b..7723d21de8d74010d6a5117faa06679fe1398cc2 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -1425,7 +1425,7 @@ public class ThriftServerRunner implements Runnable { scan.setTimeRange(0, tScan.getTimestamp()); } if (tScan.isSetCaching()) { - scan.setCaching(tScan.getCaching()); + scan.setRowLimit(tScan.getCaching()); } if (tScan.isSetBatchSize()) { scan.setBatch(tScan.getBatchSize()); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java index d2da99349e3a03dbf19ff2f4ddeec316f621e275..16451a8bb0bcd1ea1cdda304c392484a97bb7631 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java @@ -380,7 +380,7 @@ public class ThriftUtilities { if (in.isSetStopRow()) out.setStopRow(in.getStopRow()); if (in.isSetCaching()) - out.setCaching(in.getCaching()); + out.setRowLimit(in.getCaching()); if (in.isSetMaxVersions()) { out.setMaxVersions(in.getMaxVersions()); } -- 1.9.3 (Apple Git-50)