diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 5e28859..57b9e48 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -18,7 +18,17 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -28,17 +38,25 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; +import org.apache.hadoop.hbase.client.ClientScanner.ScannerCallableWithReplicas.ScannerCallablePerReplica; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.hbase.util.BoundedCompletionService; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; /** * Implements the scanner interface for the HBase client. @@ -54,18 +72,47 @@ public class ClientScanner extends AbstractClientScanner { // Current region scanner is against. Gets cleared if current region goes // wonky: e.g. if it splits on us. protected HRegionInfo currentRegion = null; - protected ScannerCallable callable = null; + protected ScannerCallableWithReplicas callable = null; protected final LinkedList cache = new LinkedList(); protected final int caching; protected long lastNext; // Keep lastResult returned successfully in case we have to reset scanner. protected Result lastResult = null; protected final long maxScannerResultSize; - private final HConnection connection; + private final ClusterConnection connection; private final TableName tableName; private final int scannerTimeout; protected boolean scanMetricsPublished = false; protected RpcRetryingCaller caller; + private Configuration conf; + private int retries; + protected final int primaryOperationTimeout; + protected final ExecutorService pool; + + /** + * Create a new ClientScanner for the specified table. An HConnection will be + * retrieved using the passed Configuration. + * Note that the passed {@link Scan}'s start row maybe changed changed. + * + * @param conf The {@link Configuration} to use. + * @param scan {@link Scan} to use in this scanner + * @param tableName The table that we wish to scan + * @throws IOException + */ + public ClientScanner(final Configuration conf, final Scan scan, + final TableName tableName, ClusterConnection connection, + ExecutorService pool, int primaryOperationTimeout) + throws IOException { + this(conf, scan, tableName, connection, + new RpcRetryingCallerFactory(conf), pool, primaryOperationTimeout); + } + + public ClientScanner(final Configuration conf, final Scan scan, + final TableName tableName, ClusterConnection connection, + RpcRetryingCallerFactory rpcFactory) + throws IOException { + this(conf, scan, tableName, connection, rpcFactory, null, 0); + } /** * Create a new ClientScanner for the specified table. An HConnection will be @@ -79,7 +126,7 @@ public class ClientScanner extends AbstractClientScanner { */ public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName) throws IOException { - this(conf, scan, tableName, HConnectionManager.getConnection(conf)); + this(conf, scan, tableName, ConnectionManager.getConnectionInternal(conf));//HConnectionManager.getConnection(conf)); } /** @@ -103,8 +150,8 @@ public class ClientScanner extends AbstractClientScanner { * @throws IOException */ public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName, - HConnection connection) throws IOException { - this(conf, scan, tableName, connection, new RpcRetryingCallerFactory(conf)); + ClusterConnection connection) throws IOException { + this(conf, scan, tableName, connection, new RpcRetryingCallerFactory(conf), null, 0); } /** @@ -112,8 +159,9 @@ public class ClientScanner extends AbstractClientScanner { */ @Deprecated public ClientScanner(final Configuration conf, final Scan scan, final byte [] tableName, - HConnection connection) throws IOException { - this(conf, scan, TableName.valueOf(tableName), connection, new RpcRetryingCallerFactory(conf)); + ClusterConnection connection) throws IOException { + this(conf, scan, TableName.valueOf(tableName), connection, new RpcRetryingCallerFactory(conf), + null, 0); } /** @@ -126,7 +174,8 @@ public class ClientScanner extends AbstractClientScanner { * @throws IOException */ public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName, - HConnection connection, RpcRetryingCallerFactory rpcFactory) throws IOException { + ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, ExecutorService pool, + int primaryOperationTimeout) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Scan table=" + tableName + ", startRow=" + Bytes.toStringBinary(scan.getStartRow())); @@ -135,6 +184,10 @@ public class ClientScanner extends AbstractClientScanner { this.tableName = tableName; this.lastNext = System.currentTimeMillis(); this.connection = connection; + this.pool = pool; + this.primaryOperationTimeout = primaryOperationTimeout; + this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); if (scan.getMaxResultSize() > 0) { this.maxScannerResultSize = scan.getMaxResultSize(); } else { @@ -159,8 +212,8 @@ public class ClientScanner extends AbstractClientScanner { HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); } - this.caller = rpcFactory. newCaller(); - + this.caller = rpcFactory. newCaller(); + this.conf = conf; initializeScannerInConstruction(); } @@ -169,7 +222,7 @@ public class ClientScanner extends AbstractClientScanner { nextScanner(this.caching, false); } - protected HConnection getConnection() { + protected ClusterConnection getConnection() { return this.connection; } @@ -224,7 +277,7 @@ public class ClientScanner extends AbstractClientScanner { // Close the previous scanner if it's open if (this.callable != null) { this.callable.setClose(); - this.caller.callWithRetries(callable); + call(scan, callable, caller); this.callable = null; } @@ -261,7 +314,7 @@ public class ClientScanner extends AbstractClientScanner { callable = getScannerCallable(localStartKey, nbRows); // Open a scanner on the region server starting at the // beginning of the region - this.caller.callWithRetries(callable); + call(scan, callable, caller); this.currentRegion = callable.getHRegionInfo(); if (this.scanMetrics != null) { this.scanMetrics.countOfRegions.incrementAndGet(); @@ -273,14 +326,315 @@ public class ClientScanner extends AbstractClientScanner { return true; } + static Result[] call(Scan scan, ScannerCallableWithReplicas callable, + RpcRetryingCaller caller) + throws IOException, RuntimeException { + if (scan.getConsistency() == Consistency.STRONG) { + return caller.callWithRetries(callable); + } else { + return caller.callWithoutRetries(callable); + } + } + + + class ScannerCallableWithReplicas implements RetryingCallable { + volatile ScannerCallablePerReplica currentScannerCallable; + final ClusterConnection cConnection; + protected final ExecutorService pool; + protected final int timeBeforeReplicas; + protected final ScannerCallable baseCallable; + + public ScannerCallableWithReplicas (ClusterConnection cConnection, ScannerCallable baseCallable, + ExecutorService pool, int timeBeforeReplicas) { + this.baseCallable = baseCallable; + this.cConnection = cConnection; + this.pool = pool; + this.timeBeforeReplicas = timeBeforeReplicas; + } + + public void setClose() { + baseCallable.setClose(); + if (currentScannerCallable != null) { + currentScannerCallable.setClose(); + } + } + + public void setCaching(int caching) { + baseCallable.setCaching(caching); + } + + public int getCaching() { + return baseCallable.getCaching(); + } + + public HRegionInfo getHRegionInfo() { + return baseCallable.getHRegionInfo(); + } + + @Override + public Result [] call() throws IOException { + if (scan.getConsistency() == Consistency.STRONG) return baseCallable.call(); + if (currentScannerCallable != null && currentScannerCallable.closed) { + if (LOG.isDebugEnabled()) { + LOG.debug("Closing scanner directly " + currentScannerCallable.scannerId); + } + Result[] r = currentScannerCallable.call(); + currentScannerCallable = null; + return r; + } + /* + * We need to do the following: + * 1. When a scan goes out to a certain replica (default or not), we need to + * continue to hit that until there is a failure. So store the last successfully invoked + * replica + * 2. We should reset scannerIDs etc when we switch to a different replica + */ + + RegionLocations rl = getRegionLocations(true, RegionReplicaUtil.DEFAULT_REPLICA_ID); + BoundedCompletionService> cs = + new BoundedCompletionService>(pool, rl.size()); + + List exceptions = null; + int submitted = 0, completed = 0; + AtomicBoolean done = new AtomicBoolean(false); + // submit call for the primary replica. + submitted += addCallsForCurrentReplica(cs, rl); + try { + // wait for the timeout to see whether the primary responds back + Future> f = cs.poll(timeBeforeReplicas, + TimeUnit.MICROSECONDS); // Yes, microseconds + if (f != null) { + Pair r = f.get(); + if (r != null && r.getSecond() != null) { + updateCurrentlyServingReplica(r.getSecond(), done); + } + return r == null ? null : r.getFirst(); //great we got a response + } + } catch (ExecutionException e) { + // the primary call failed with RetriesExhaustedException or DoNotRetryIOException + // but the secondaries might still succeed. Continue on the replica RPCs. + exceptions = new ArrayList(rl.size()); + exceptions.add(e); + completed++; + } catch (CancellationException e) { + throw new InterruptedIOException(); + } catch (InterruptedException e) { + throw new InterruptedIOException(); + } + // submit call for the all of the secondaries at once + // TODO: this may be an overkill for large region replication + submitted += addCallsForOtherReplicas(cs, rl, 0, rl.size() - 1); + try { + while (completed < submitted) { + try { + Future> f = cs.take(); + if (f != null) { + Pair r = f.get(); + if (r != null && r.getSecond() != null) { + updateCurrentlyServingReplica(r.getSecond(), done); + } + return r == null ? null : r.getFirst(); // great we got an answer + } + } catch (ExecutionException e) { + // if not cancel or interrupt, wait until all RPC's are done + // one of the tasks failed. Save the exception for later. + if (exceptions == null) exceptions = new ArrayList(rl.size()); + exceptions.add(e); + completed++; + } + } + } catch (CancellationException e) { + throw new InterruptedIOException(); + } catch (InterruptedException e) { + throw new InterruptedIOException(); + } finally { + // We get there because we were interrupted or because one or more of the + // calls succeeded or failed. In all case, we stop all our tasks. + cs.cancelAll(true); + } + + if (exceptions != null && !exceptions.isEmpty()) { + RpcRetryingCallerWithReadReplicas.throwEnrichedException(exceptions.get(0), + retries, toString()); // just rethrow the first exception for now. + } + return null; // unreachable + } + + private void updateCurrentlyServingReplica(ScannerCallablePerReplica scanner, + AtomicBoolean done) { + if (done.compareAndSet(false, true)) { + currentScannerCallable = scanner; + if (LOG.isDebugEnabled()) { + LOG.debug("Setting current scanner as " + currentScannerCallable.scannerId + + " associated with " + currentScannerCallable.id); + } + } else { + // else someone else already completed the RPC + // close ourself since we are out of sync with + // client state + scanner.setClose(); + if (LOG.isDebugEnabled()) { + LOG.debug("Closing scanner " + scanner.id + + " because this was slow and another replica succeeded"); + } + try { + scanner.call(); + } catch (IOException e) { + // ignore + } + } + } + + private int addCallsForCurrentReplica(BoundedCompletionService> cs, + RegionLocations rl) { + if (currentScannerCallable == null) { + HRegionLocation hrl = rl.getRegionLocation(0); + currentScannerCallable = new ScannerCallablePerReplica(0, caching, hrl); + } + RetryingRPC retryingOnReplica = new RetryingRPC(currentScannerCallable); + cs.submit(retryingOnReplica); + return 1; + } + + private int addCallsForOtherReplicas(BoundedCompletionService> cs, + RegionLocations rl, int min, int max) { + for (int id = min; id <= max; id++) { + if (currentScannerCallable.id == id) continue; //this was already scheduled earlier + HRegionLocation hrl = rl.getRegionLocation(id); + ScannerCallablePerReplica s = new ScannerCallablePerReplica(id, caching, hrl); + RetryingRPC retryingOnReplica = new RetryingRPC(s); + cs.submit(retryingOnReplica); + } + return max - min + 1; + } + + private RegionLocations getRegionLocations(boolean useCache, int replicaId) + throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { + RegionLocations rl; + try { + rl = cConnection.locateRegion(tableName, baseCallable.getRow(), useCache, true, replicaId); + } catch (DoNotRetryIOException e) { + throw e; + } catch (RetriesExhaustedException e) { + throw e; + } catch (InterruptedIOException e) { + throw e; + } catch (IOException e) { + throw new RetriesExhaustedException("Can't get the location", e); + } + if (rl == null) { + throw new RetriesExhaustedException("Can't get the locations"); + } + + return rl; + } + + class ScannerCallablePerReplica extends ScannerCallable { + // need to inherit from ScannerCallable since we'd like to manage the + // location for replicas + final int id; + public ScannerCallablePerReplica(int id, int caching, HRegionLocation location) { + super(ScannerCallableWithReplicas.this.baseCallable.connection, + ScannerCallableWithReplicas.this.baseCallable.tableName, + ScannerCallableWithReplicas.this.baseCallable.getScan(), + ScannerCallableWithReplicas.this.baseCallable.scanMetrics); + this.setCaching(caching); + this.id = id; + this.location = location; + } + + @Override + public void prepare(boolean reload) throws IOException { + if (Thread.interrupted()) { + throw new InterruptedIOException(); + } + if (reload || location == null) { + RegionLocations rl = getRegionLocations(false, id); + location = id < rl.size() ? rl.getRegionLocation(id) : null; + } + + if (location == null || location.getServerName() == null) { + // With this exception, there will be a retry. The location can be null for a replica + // when the table is created or after a split. + throw new HBaseIOException("There is no location for replica id #" + id); + } + ServerName dest = location.getServerName(); + assert dest != null; + if (!instantiated || reload) { + checkIfRegionServerIsRemote(); + instantiated = true; + } + + // check how often we retry. + // HConnectionManager will call instantiateServer with reload==true + // if and only if for retries. + if (reload && this.scanMetrics != null) { + this.scanMetrics.countOfRPCRetries.incrementAndGet(); + if (isRegionServerRemote) { + this.scanMetrics.countOfRemoteRPCRetries.incrementAndGet(); + } + } + // overwrite the location + setStub(connection.getClient(dest)); + } + + @Override + public Result[] call() throws IOException { + if (Thread.interrupted()) { + throw new InterruptedIOException(); //TODO: how does this affect the scanner.close + } + return super.call(); + } + } + + @Override + public void prepare(boolean reload) throws IOException { + baseCallable.prepare(reload); + } + + @Override + public void throwable(Throwable t, boolean retrying) { + baseCallable.throwable(t, retrying); + } + + @Override + public String getExceptionMessageAdditionalDetail() { + return baseCallable.getExceptionMessageAdditionalDetail(); + } + + @Override + public long sleep(long pause, int tries) { + return baseCallable.sleep(pause, tries); + } + } + + class RetryingRPC implements Callable> { + final RetryingCallable callable; + + RetryingRPC(RetryingCallable callable) { + this.callable = callable; + } + + @Override + public Pair call() throws IOException { + Result[] res = new RpcRetryingCallerFactory(ClientScanner.this.conf).newCaller(). + callWithRetries(callable, scannerTimeout); + //Result[] res = caller.callWithRetries(callable, scannerTimeout); + return new Pair(res, + (ScannerCallablePerReplica)callable); + } + } + @InterfaceAudience.Private - protected ScannerCallable getScannerCallable(byte [] localStartKey, + protected ScannerCallableWithReplicas getScannerCallable(byte [] localStartKey, int nbRows) { scan.setStartRow(localStartKey); ScannerCallable s = new ScannerCallable(getConnection(), getTable(), scan, this.scanMetrics); s.setCaching(nbRows); - return s; + ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(getConnection(), s, + pool, primaryOperationTimeout); + return sr; } /** @@ -326,17 +680,17 @@ public class ClientScanner extends AbstractClientScanner { // Skip only the first row (which was the last row of the last // already-processed batch). callable.setCaching(1); - values = this.caller.callWithRetries(callable); + values = call(scan, callable, caller); callable.setCaching(this.caching); skipFirst = false; } // Server returns a null values if scanning is to stop. Else, // returns an empty array if scanning is to go on and we've just // exhausted current region. - values = this.caller.callWithRetries(callable); + values = call(scan, callable, caller); if (skipFirst && values != null && values.length == 1) { skipFirst = false; // Already skipped, unset it before scanning again - values = this.caller.callWithRetries(callable); + values = call(scan, callable, caller); } retryAfterOutOfOrderException = true; } catch (DoNotRetryIOException e) { @@ -428,7 +782,7 @@ public class ClientScanner extends AbstractClientScanner { if (callable != null) { callable.setClose(); try { - this.caller.callWithRetries(callable); + call(scan, callable, caller); } catch (IOException e) { // We used to catch this error, interpret, and rethrow. However, we // have since decided that it's not nice for a scanner's close to diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java index a17be55..6445cbd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java @@ -70,7 +70,7 @@ public class ClientSmallScanner extends ClientScanner { */ public ClientSmallScanner(final Configuration conf, final Scan scan, final TableName tableName) throws IOException { - this(conf, scan, tableName, HConnectionManager.getConnection(conf)); + this(conf, scan, tableName, ConnectionManager.getConnectionInternal(conf)); } /** @@ -84,7 +84,7 @@ public class ClientSmallScanner extends ClientScanner { * @throws IOException */ public ClientSmallScanner(final Configuration conf, final Scan scan, - final TableName tableName, HConnection connection) throws IOException { + final TableName tableName, ClusterConnection connection) throws IOException { this(conf, scan, tableName, connection, new RpcRetryingCallerFactory(conf)); } @@ -100,7 +100,7 @@ public class ClientSmallScanner extends ClientScanner { * @throws IOException */ public ClientSmallScanner(final Configuration conf, final Scan scan, - final TableName tableName, HConnection connection, + final TableName tableName, ClusterConnection connection, RpcRetryingCallerFactory rpcFactory) throws IOException { super(conf, scan, tableName, connection, rpcFactory); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index b81b5ba..58c67fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -136,6 +136,7 @@ public class HTable implements HTableInterface { private final boolean cleanupConnectionOnClose; // close the connection in close() private Consistency defaultConsistency = Consistency.STRONG; private int primaryCallTimeoutMicroSecond; + private int primaryCallTimeoutMicroSecondScan; /** The Async process for puts with autoflush set to false or multiputs */ @@ -360,6 +361,8 @@ public class HTable implements HTableInterface { HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); this.primaryCallTimeoutMicroSecond = this.configuration.getInt("hbase.client.primaryCallTimeout.get", 10000); // 10 ms + this.primaryCallTimeoutMicroSecondScan = + this.configuration.getInt("hbase.client.primaryCallTimeout.scan", 1000000); // 1000 ms this.retries = configuration.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); @@ -767,7 +770,7 @@ public class HTable implements HTableInterface { this.connection); } return new ClientScanner(getConfiguration(), scan, - getName(), this.connection); + getName(), this.connection, pool, primaryCallTimeoutMicroSecondScan); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java index 618b3b3..8376c55 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClientScanner.ScannerCallableWithReplicas; import org.apache.hadoop.hbase.util.Bytes; /** @@ -50,7 +51,7 @@ public class ReversedClientScanner extends ClientScanner { * @throws IOException */ public ReversedClientScanner(Configuration conf, Scan scan, - TableName tableName, HConnection connection) throws IOException { + TableName tableName, ClusterConnection connection) throws IOException { super(conf, scan, tableName, connection); } @@ -120,13 +121,15 @@ public class ReversedClientScanner extends ClientScanner { return true; } - protected ScannerCallable getScannerCallable(byte[] localStartKey, + protected ScannerCallableWithReplicas getScannerCallable(byte[] localStartKey, int nbRows, byte[] locateStartRow) { scan.setStartRow(localStartKey); ScannerCallable s = new ReversedScannerCallable(getConnection(), getTable(), scan, this.scanMetrics, locateStartRow); s.setCaching(nbRows); - return s; + ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(getConnection(), s, + pool, primaryOperationTimeout); + return sr; } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index c26629d..cba6ff1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -225,7 +225,7 @@ public class RpcRetryingCallerWithReadReplicas { } if (exceptions != null && !exceptions.isEmpty()) { - throwEnrichedException(exceptions.get(0)); // just rethrow the first exception for now. + throwEnrichedException(exceptions.get(0), retries, toString()); // just rethrow the first exception for now. } return null; // unreachable } @@ -234,7 +234,7 @@ public class RpcRetryingCallerWithReadReplicas { * Extract the real exception from the ExecutionException, and throws what makes more * sense. */ - private void throwEnrichedException(ExecutionException e) + static void throwEnrichedException(ExecutionException e, int retries, String str) throws RetriesExhaustedException, DoNotRetryIOException { Throwable t = e.getCause(); assert t != null; // That's what ExecutionException is about: holding an exception @@ -249,7 +249,7 @@ public class RpcRetryingCallerWithReadReplicas { RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(t, - EnvironmentEdgeManager.currentTimeMillis(), toString()); + EnvironmentEdgeManager.currentTimeMillis(), str); List exceptions = Collections.singletonList(qt); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index f7d1c51..a5c6622 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -115,7 +115,7 @@ public class Scan extends Query { private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); private Boolean loadColumnFamiliesOnDemand = null; - private Consistency consistency = null; + private Consistency consistency = Consistency.STRONG; /** * Set it true for small scan to get better performance diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 1a3d7a7..048a31a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -61,9 +61,9 @@ public class ScannerCallable extends RegionServerCallable { public static final String LOG_SCANNER_ACTIVITY = "hbase.client.log.scanner.activity"; public static final Log LOG = LogFactory.getLog(ScannerCallable.class); - private long scannerId = -1L; + protected long scannerId = -1L; //TODO: make it private; made it protected for testing protected boolean instantiated = false; - private boolean closed = false; + protected boolean closed = false; private Scan scan; private int caching = 1; protected ScanMetrics scanMetrics; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 76c9815..7010faa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -898,6 +898,7 @@ public final class ProtobufUtil { if (scan.isReversed()) { scanBuilder.setReversed(scan.isReversed()); } + scanBuilder.setConsistency(toConsistency(scan.getConsistency())); return scanBuilder.build(); } @@ -977,6 +978,9 @@ public final class ProtobufUtil { if (proto.hasReversed()) { scan.setReversed(proto.getReversed()); } + if (proto.hasConsistency()) { + scan.setConsistency(toConsistency(proto.getConsistency())); + } return scan; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index e9d5070..3d3505d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -368,7 +368,7 @@ public final class ResponseConverter { } cells.add(cellScanner.current()); } - results[i] = Result.create(cells); + results[i] = Result.create(cells, null, response.getStale()); } else { // Result is pure pb. results[i] = ProtobufUtil.toResult(response.getResults(i)); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 741bf84..12bcfde 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -17404,6 +17404,16 @@ public final class ClientProtos { */ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder( int index); + + // optional bool stale = 6; + /** + * optional bool stale = 6; + */ + boolean hasStale(); + /** + * optional bool stale = 6; + */ + boolean getStale(); } /** * Protobuf type {@code ScanResponse} @@ -17506,6 +17516,11 @@ public final class ClientProtos { results_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.PARSER, extensionRegistry)); break; } + case 48: { + bitField0_ |= 0x00000008; + stale_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17719,12 +17734,29 @@ public final class ClientProtos { return results_.get(index); } + // optional bool stale = 6; + public static final int STALE_FIELD_NUMBER = 6; + private boolean stale_; + /** + * optional bool stale = 6; + */ + public boolean hasStale() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool stale = 6; + */ + public boolean getStale() { + return stale_; + } + private void initFields() { cellsPerResult_ = java.util.Collections.emptyList(); scannerId_ = 0L; moreResults_ = false; ttl_ = 0; results_ = java.util.Collections.emptyList(); + stale_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17753,6 +17785,9 @@ public final class ClientProtos { for (int i = 0; i < results_.size(); i++) { output.writeMessage(5, results_.get(i)); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(6, stale_); + } getUnknownFields().writeTo(output); } @@ -17787,6 +17822,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(5, results_.get(i)); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, stale_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -17829,6 +17868,11 @@ public final class ClientProtos { } result = result && getResultsList() .equals(other.getResultsList()); + result = result && (hasStale() == other.hasStale()); + if (hasStale()) { + result = result && (getStale() + == other.getStale()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -17862,6 +17906,10 @@ public final class ClientProtos { hash = (37 * hash) + RESULTS_FIELD_NUMBER; hash = (53 * hash) + getResultsList().hashCode(); } + if (hasStale()) { + hash = (37 * hash) + STALE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getStale()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -17992,6 +18040,8 @@ public final class ClientProtos { } else { resultsBuilder_.clear(); } + stale_ = false; + bitField0_ = (bitField0_ & ~0x00000020); return this; } @@ -18046,6 +18096,10 @@ public final class ClientProtos { } else { result.results_ = resultsBuilder_.build(); } + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000008; + } + result.stale_ = stale_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -18107,6 +18161,9 @@ public final class ClientProtos { } } } + if (other.hasStale()) { + setStale(other.getStale()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18717,6 +18774,39 @@ public final class ClientProtos { return resultsBuilder_; } + // optional bool stale = 6; + private boolean stale_ ; + /** + * optional bool stale = 6; + */ + public boolean hasStale() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool stale = 6; + */ + public boolean getStale() { + return stale_; + } + /** + * optional bool stale = 6; + */ + public Builder setStale(boolean value) { + bitField0_ |= 0x00000020; + stale_ = value; + onChanged(); + return this; + } + /** + * optional bool stale = 6; + */ + public Builder clearStale() { + bitField0_ = (bitField0_ & ~0x00000020); + stale_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:ScanResponse) } @@ -29520,46 +29610,46 @@ public final class ClientProtos { "Specifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\022\n\nscanne", "r_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rcl" + "ose_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004" + - "\"y\n\014ScanResponse\022\030\n\020cells_per_result\030\001 \003" + - "(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003" + - " \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007results\030\005 \003(\0132\007.Res" + - "ult\"\263\001\n\024BulkLoadHFileRequest\022 \n\006region\030\001" + - " \002(\0132\020.RegionSpecifier\0225\n\013family_path\030\002 " + - "\003(\0132 .BulkLoadHFileRequest.FamilyPath\022\026\n" + - "\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006f" + - "amily\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFi", - "leResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Coprocesso" + - "rServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_nam" + - "e\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030" + - "\004 \002(\014\"d\n\031CoprocessorServiceRequest\022 \n\006re" + - "gion\030\001 \002(\0132\020.RegionSpecifier\022%\n\004call\030\002 \002" + - "(\0132\027.CoprocessorServiceCall\"]\n\032Coprocess" + - "orServiceResponse\022 \n\006region\030\001 \002(\0132\020.Regi" + - "onSpecifier\022\035\n\005value\030\002 \002(\0132\016.NameBytesPa" + - "ir\"L\n\006Action\022\r\n\005index\030\001 \001(\r\022 \n\010mutation\030" + - "\002 \001(\0132\016.MutationProto\022\021\n\003get\030\003 \001(\0132\004.Get", - "\"Y\n\014RegionAction\022 \n\006region\030\001 \002(\0132\020.Regio" + - "nSpecifier\022\016\n\006atomic\030\002 \001(\010\022\027\n\006action\030\003 \003" + - "(\0132\007.Action\"^\n\021ResultOrException\022\r\n\005inde" + - "x\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Result\022!\n\texce" + - "ption\030\003 \001(\0132\016.NameBytesPair\"f\n\022RegionAct" + - "ionResult\022-\n\021resultOrException\030\001 \003(\0132\022.R" + - "esultOrException\022!\n\texception\030\002 \001(\0132\016.Na" + - "meBytesPair\"G\n\014MultiRequest\022#\n\014regionAct" + - "ion\030\001 \003(\0132\r.RegionAction\022\022\n\nnonceGroup\030\002" + - " \001(\004\"@\n\rMultiResponse\022/\n\022regionActionRes", - "ult\030\001 \003(\0132\023.RegionActionResult*\'\n\013Consis" + - "tency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\261\002\n\rClie" + - "ntService\022 \n\003Get\022\013.GetRequest\032\014.GetRespo" + - "nse\022)\n\006Mutate\022\016.MutateRequest\032\017.MutateRe" + - "sponse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanRespo" + - "nse\022>\n\rBulkLoadHFile\022\025.BulkLoadHFileRequ" + - "est\032\026.BulkLoadHFileResponse\022F\n\013ExecServi" + - "ce\022\032.CoprocessorServiceRequest\032\033.Coproce" + - "ssorServiceResponse\022&\n\005Multi\022\r.MultiRequ" + - "est\032\016.MultiResponseBB\n*org.apache.hadoop", - ".hbase.protobuf.generatedB\014ClientProtosH" + - "\001\210\001\001\240\001\001" + "\"\210\001\n\014ScanResponse\022\030\n\020cells_per_result\030\001 " + + "\003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030" + + "\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007results\030\005 \003(\0132\007.Re" + + "sult\022\r\n\005stale\030\006 \001(\010\"\263\001\n\024BulkLoadHFileReq" + + "uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\0225" + + "\n\013family_path\030\002 \003(\0132 .BulkLoadHFileReque" + + "st.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n" + + "\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(", + "\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002" + + "(\010\"a\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(" + + "\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013method_name\030\003" + + " \002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031CoprocessorServ" + + "iceRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" + + "fier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceC" + + "all\"]\n\032CoprocessorServiceResponse\022 \n\006reg" + + "ion\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002" + + "(\0132\016.NameBytesPair\"L\n\006Action\022\r\n\005index\030\001 " + + "\001(\r\022 \n\010mutation\030\002 \001(\0132\016.MutationProto\022\021\n", + "\003get\030\003 \001(\0132\004.Get\"Y\n\014RegionAction\022 \n\006regi" + + "on\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006atomic\030\002 \001" + + "(\010\022\027\n\006action\030\003 \003(\0132\007.Action\"^\n\021ResultOrE" + + "xception\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132" + + "\007.Result\022!\n\texception\030\003 \001(\0132\016.NameBytesP" + + "air\"f\n\022RegionActionResult\022-\n\021resultOrExc" + + "eption\030\001 \003(\0132\022.ResultOrException\022!\n\texce" + + "ption\030\002 \001(\0132\016.NameBytesPair\"G\n\014MultiRequ" + + "est\022#\n\014regionAction\030\001 \003(\0132\r.RegionAction" + + "\022\022\n\nnonceGroup\030\002 \001(\004\"@\n\rMultiResponse\022/\n", + "\022regionActionResult\030\001 \003(\0132\023.RegionAction" + + "Result*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIM" + + "ELINE\020\0012\261\002\n\rClientService\022 \n\003Get\022\013.GetRe" + + "quest\032\014.GetResponse\022)\n\006Mutate\022\016.MutateRe" + + "quest\032\017.MutateResponse\022#\n\004Scan\022\014.ScanReq" + + "uest\032\r.ScanResponse\022>\n\rBulkLoadHFile\022\025.B" + + "ulkLoadHFileRequest\032\026.BulkLoadHFileRespo" + + "nse\022F\n\013ExecService\022\032.CoprocessorServiceR" + + "equest\032\033.CoprocessorServiceResponse\022&\n\005M" + + "ulti\022\r.MultiRequest\032\016.MultiResponseBB\n*o", + "rg.apache.hadoop.hbase.protobuf.generate" + + "dB\014ClientProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -29661,7 +29751,7 @@ public final class ClientProtos { internal_static_ScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ScanResponse_descriptor, - new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", }); + new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", }); internal_static_BulkLoadHFileRequest_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_BulkLoadHFileRequest_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 2529c0c..24ab2f7 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -289,6 +289,7 @@ message ScanResponse { // This field is mutually exclusive with cells_per_result (since the Cells will // be inside the pb'd Result) repeated Result results = 5; + optional bool stale = 6; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index ebf1325..4f1072e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; @@ -3152,6 +3153,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa try { int i = 0; synchronized(scanner) { + boolean stale = (region.getRegionInfo().getReplicaId() != 0); for (; i < rows && currentScanResultSize < maxResultSize; ) { // Collect values to be returned here @@ -3162,7 +3164,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa currentScanResultSize += KeyValueUtil.ensureKeyValue(kv).heapSize(); } } - results.add(Result.create(values)); + results.add(Result.create(values, null, stale)); i++; } if (!moreRows) { @@ -3189,7 +3191,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa moreResults = false; results = null; } else { - addResults(builder, results, controller); + addResults(builder, results, controller, region.getRegionInfo().getReplicaId() == 0); } } finally { // We're done. On way out re-add the above removed lease. @@ -3235,8 +3237,9 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa } private void addResults(final ScanResponse.Builder builder, final List results, - final RpcController controller) { + final RpcController controller, boolean isDefaultRegion) { if (results == null || results.isEmpty()) return; + builder.setStale(!isDefaultRegion); if (isClientCellBlockSupport()) { for (Result res : results) { builder.addCellsPerResult(res.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index f8d7cf5..6fc2c92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -38,7 +38,9 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.zookeeper.KeeperException; import org.junit.After; @@ -50,6 +52,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -90,7 +93,17 @@ public class TestReplicasClient { @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { + slowdownCode(e); + } + + @Override + public RegionScanner preScannerOpen(final ObserverContext e, + final Scan scan, final RegionScanner s) throws IOException { + slowdownCode(e); + return null; + } + private void slowdownCode(final ObserverContext e) { if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) { CountDownLatch latch = cdl.get(); try { @@ -118,7 +131,7 @@ public class TestReplicasClient { // enable store file refreshing HTU.getConfiguration().setInt( StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD); - + HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true); HTU.startMiniCluster(NB_SERVERS); // Create table then get the single region for our new table. @@ -464,4 +477,81 @@ public class TestReplicasClient { closeRegion(hriSecondary); } } + + @Test + public void testScanWithReplicas() throws Exception { + openRegion(hriSecondary); + int NUMROWS = 100; + + try { + for (int i = 0; i < NUMROWS; i++) { + byte[] b1 = Bytes.toBytes("testUseRegionWithReplica" + i); + Put p = new Put(b1); + p.add(f, b1, b1); + table.put(p); + } + LOG.info("Put done"); + + byte[] start = Bytes.toBytes("testUseRegionWithReplica" + 0); + Scan scan = new Scan(start); + scan.setConsistency(Consistency.TIMELINE); + ResultScanner scanner = table.getScanner(scan); + Iterator iter = scanner.iterator(); + Assert.assertTrue (iter.hasNext() == true); + int count = 0; + while (iter.hasNext()) { + count++; + Assert.assertTrue(iter.next().isStale() == false); + } + Assert.assertTrue(count == NUMROWS); + + //Even if we were to slow the server down, unless we ask for stale + //we won't get it + SlowMeCopro.sleepTime.set(5000); + scan = new Scan(start); + scan.setConsistency(Consistency.STRONG); + scanner = table.getScanner(scan); + iter = scanner.iterator(); + Assert.assertTrue (iter.hasNext() == true); + count = 0; + while (iter.hasNext()) { + count++; + Assert.assertTrue(iter.next().isStale() == false); + } + Assert.assertTrue(count == NUMROWS); + + HTU.getHBaseAdmin().flush(table.getTableName()); + LOG.info("flush done"); + Thread.sleep(1000 + REFRESH_PERIOD * 2); + + // now we ask for potentially stale + SlowMeCopro.sleepTime.set(5000); + scan = new Scan(start); + scan.setConsistency(Consistency.TIMELINE); + scanner = table.getScanner(scan); + iter = scanner.iterator(); + Assert.assertTrue (iter.hasNext() == true); + count = 0; + while (iter.hasNext()) { + count++; + Assert.assertTrue(iter.next().isStale() == true); + } + Assert.assertTrue(count == NUMROWS); + SlowMeCopro.sleepTime.set(0); + } finally { + SlowMeCopro.cdl.get().countDown(); + SlowMeCopro.sleepTime.set(0); + for (int i = 0; i < 1000; i++) { + byte[] b1 = Bytes.toBytes("testUseRegionWithReplica" + i); + Delete d = new Delete(b1); + table.delete(d); + } + closeRegion(hriSecondary); + } + } + + @Test + public void testAllScannersClosed() throws Exception { + + } }