From 8521181f6ceca70f3c7137f9d380209249d19fb2 Mon Sep 17 00:00:00 2001 From: chenheng Date: Mon, 11 Apr 2016 17:52:40 +0800 Subject: [PATCH] HBASE-15629 Backport HBASE-14703 to 0.98+ --- .../apache/hadoop/hbase/client/AsyncProcess.java | 122 +- .../org/apache/hadoop/hbase/client/HTable.java | 137 +- .../hadoop/hbase/client/HTableMultiplexer.java | 2 +- .../hadoop/hbase/client/MetricsConnection.java | 10 +- .../apache/hadoop/hbase/client/MultiResponse.java | 71 +- .../hadoop/hbase/client/MultiServerCallable.java | 7 +- .../client/PayloadCarryingServerCallable.java | 48 + .../hadoop/hbase/client/ResultStatsUtil.java | 27 +- .../hadoop/hbase/client/RetryingTimeTracker.java | 57 + .../hbase/client/RpcRetryingCallerFactory.java | 9 +- .../hbase/client/ServerStatisticTracker.java | 5 +- .../hadoop/hbase/client/StatisticTrackable.java | 33 + .../client/StatsTrackingRpcRetryingCaller.java | 63 - .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 11 +- .../hadoop/hbase/protobuf/ResponseConverter.java | 29 +- .../hadoop/hbase/client/TestAsyncProcess.java | 2 - .../hbase/protobuf/generated/ClientProtos.java | 1439 +++++++++++++++++++- hbase-protocol/src/main/protobuf/Client.proto | 8 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 4 +- .../hadoop/hbase/regionserver/HRegionServer.java | 63 +- .../hadoop/hbase/client/TestCheckAndMutate.java | 9 +- .../hadoop/hbase/client/TestClientPushback.java | 29 + .../hadoop/hbase/client/TestFromClientSide.java | 8 +- 23 files changed, 1874 insertions(+), 319 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/PayloadCarryingServerCallable.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index ca590fc..11179f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.backoff.ServerStatistics; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; @@ -346,7 +347,7 @@ class AsyncProcess { } while (retainedActions.isEmpty() && atLeastOne && !hasError()); HConnectionManager.ServerErrorTracker errorsByServer = createServerErrorTracker(); - sendMultiAction(retainedActions, actionsByServer, 1, errorsByServer, batchCallback); + sendMultiAction(retainedActions, actionsByServer, 1, errorsByServer, batchCallback, null); } /** @@ -478,7 +479,8 @@ class AsyncProcess { * * @param rows the list of rows. */ - public void submitAll(List rows) { + public void submitAll(List rows, Batch.Callback batchCallback, + PayloadCarryingServerCallable callable) { List> actions = new ArrayList>(rows.size()); // The position will be used by the processBatch to match the object array returned. @@ -497,7 +499,11 @@ class AsyncProcess { actions.add(action); } HConnectionManager.ServerErrorTracker errorsByServer = createServerErrorTracker(); - submit(actions, actions, 1, errorsByServer); + submit(actions, actions, 1, errorsByServer, batchCallback, callable); + } + + public void submitAll(List rows) { + submitAll(rows, null, null); } private void setNonce(NonceGenerator ng, Row r, Action action) { @@ -516,7 +522,9 @@ class AsyncProcess { */ private void submit(List> initialActions, List> currentActions, int numAttempt, - final HConnectionManager.ServerErrorTracker errorsByServer) { + final HConnectionManager.ServerErrorTracker errorsByServer, + Batch.Callback batchCallback, + PayloadCarryingServerCallable callable) { if (numAttempt > 1){ retriesCnt.incrementAndGet(); @@ -535,7 +543,8 @@ class AsyncProcess { } if (!actionsByServer.isEmpty()) { - sendMultiAction(initialActions, actionsByServer, numAttempt, errorsByServer, null); + sendMultiAction(initialActions, actionsByServer, numAttempt, + errorsByServer, batchCallback, callable); } } @@ -551,14 +560,15 @@ class AsyncProcess { Map> actionsByServer, final int numAttempt, final HConnectionManager.ServerErrorTracker errorsByServer, - Batch.Callback batchCallback) { + Batch.Callback batchCallback, + PayloadCarryingServerCallable callable) { // Send the queries and add them to the inProgress list // This iteration is by server (the HRegionLocation comparator is by server portion only). for (Map.Entry> e : actionsByServer.entrySet()) { HRegionLocation loc = e.getKey(); MultiAction multiAction = e.getValue(); Collection runnables = getNewMultiActionRunnable(initialActions, loc, - multiAction, numAttempt, errorsByServer, batchCallback); + multiAction, numAttempt, errorsByServer, batchCallback, callable); for (Runnable runnable: runnables) { try { incTaskCounters(multiAction.getRegions(), loc.getServerName()); @@ -576,45 +586,50 @@ class AsyncProcess { decTaskCounters(multiAction.getRegions(), loc.getServerName()); // We're likely to fail again, but this will increment the attempt counter, so it will // finish. - receiveGlobalFailure(initialActions, multiAction, loc, numAttempt, t, errorsByServer); + receiveGlobalFailure(initialActions, multiAction, loc, numAttempt, t, + errorsByServer, callable); } } } } private Runnable getNewSingleServerRunnable( - final List> initialActions, - final HRegionLocation loc, - final MultiAction multiAction, - final int numAttempt, - final HConnectionManager.ServerErrorTracker errorsByServer, - final Batch.Callback batchCallback) { + final List> initialActions, + final HRegionLocation loc, + final MultiAction multiAction, + final int numAttempt, + final HConnectionManager.ServerErrorTracker errorsByServer, + final Batch.Callback batchCallback, + final PayloadCarryingServerCallable pcsCallable) { return new Runnable() { @Override public void run() { MultiResponse res; try { - MultiServerCallable callable = createCallable(loc, multiAction); + PayloadCarryingServerCallable callable = pcsCallable; + if (pcsCallable == null) { + callable = createCallable(loc, multiAction); + } try { res = createCaller(callable).callWithoutRetries(callable, timeout); } catch (IOException e) { // The service itself failed . It may be an error coming from the communication // layer, but, as well, a functional error raised by the server. receiveGlobalFailure(initialActions, multiAction, loc, numAttempt, e, - errorsByServer); + errorsByServer, callable); return; } catch (Throwable t) { // This should not happen. Let's log & retry anyway. LOG.error("#" + id + ", Caught throwable while calling. This is unexpected." + " Retrying. Server is " + loc.getServerName() + ", tableName=" + tableName, t); receiveGlobalFailure(initialActions, multiAction, loc, numAttempt, t, - errorsByServer); + errorsByServer, callable); return; } // Nominal case: we received an answer from the server, and it's not an exception. receiveMultiAction(initialActions, multiAction, loc, res, numAttempt, errorsByServer, - batchCallback); + batchCallback, callable); } finally { decTaskCounters(multiAction.getRegions(), loc.getServerName()); @@ -629,7 +644,7 @@ class AsyncProcess { final MultiAction multiAction, final int numAttempt, final HConnectionManager.ServerErrorTracker errorsByServer, - final Batch.Callback batchCallback) { + final Batch.Callback batchCallback, PayloadCarryingServerCallable callable) { // no stats to manage, just do the standard action if (AsyncProcess.this.hConnection.getStatisticsTracker() == null) { if (hConnection.getConnectionMetrics() != null) { @@ -638,7 +653,7 @@ class AsyncProcess { List toReturn = new ArrayList(1); toReturn.add(Trace.wrap("AsyncProcess.sendMultiAction", getNewSingleServerRunnable(initialActions, loc, multiAction, numAttempt, - errorsByServer, batchCallback))); + errorsByServer, batchCallback, callable))); return toReturn; } else { // group the actions by the amount of delay @@ -660,7 +675,7 @@ class AsyncProcess { for (DelayingRunner runner : actions.values()) { String traceText = "AsyncProcess.sendMultiAction"; Runnable runnable = getNewSingleServerRunnable(initialActions, loc, runner.getActions(), - numAttempt, errorsByServer, batchCallback); + numAttempt, errorsByServer, batchCallback, callable); // use a delay runner only if we need to sleep for some time if (runner.getSleepTime() > 0) { runner.setRunner(runnable); @@ -683,8 +698,6 @@ class AsyncProcess { } /** - * @param server server location where the target region is hosted - * @param regionName name of the region which we are going to write some data * @return the amount of time the client should wait until it submit a request to the * specified server and region */ @@ -709,7 +722,7 @@ class AsyncProcess { * @param callable: used in tests. * @return Returns a caller. */ - protected RpcRetryingCaller createCaller(MultiServerCallable callable) { + protected RpcRetryingCaller createCaller(PayloadCarryingServerCallable callable) { return rpcCallerFactory. newCaller(); } @@ -760,7 +773,8 @@ class AsyncProcess { */ private void receiveGlobalFailure(List> initialActions, MultiAction rsActions, HRegionLocation location, int numAttempt, Throwable t, - HConnectionManager.ServerErrorTracker errorsByServer) { + HConnectionManager.ServerErrorTracker errorsByServer, + PayloadCarryingServerCallable callable) { // Do not use the exception for updating cache because it might be coming from // any of the regions in the MultiAction. hConnection.updateCachedLocations(tableName, @@ -776,9 +790,8 @@ class AsyncProcess { } } } - logAndResubmit(initialActions, location, toReplay, numAttempt, rsActions.size(), - t, errorsByServer); + t, errorsByServer, callable); } /** @@ -788,7 +801,8 @@ class AsyncProcess { private void logAndResubmit(List> initialActions, HRegionLocation oldLocation, List> toReplay, int numAttempt, int failureCount, Throwable throwable, - HConnectionManager.ServerErrorTracker errorsByServer) { + HConnectionManager.ServerErrorTracker errorsByServer, + PayloadCarryingServerCallable callable) { if (toReplay.isEmpty()) { // it's either a success or a last failure if (failureCount != 0) { @@ -830,7 +844,7 @@ class AsyncProcess { return; } - submit(initialActions, toReplay, numAttempt + 1, errorsByServer); + submit(initialActions, toReplay, numAttempt + 1, errorsByServer, null, callable); } /** @@ -846,7 +860,8 @@ class AsyncProcess { HRegionLocation location, MultiResponse responses, int numAttempt, HConnectionManager.ServerErrorTracker errorsByServer, - Batch.Callback batchCallback) { + Batch.Callback batchCallback, + PayloadCarryingServerCallable callable) { assert responses != null; // Success or partial success @@ -860,17 +875,20 @@ class AsyncProcess { int failureCount = 0; boolean canRetry = true; - for (Map.Entry>> resultsForRS : + Map results = responses.getResults(); + updateStats(location.getServerName(), results); + + for (Map.Entry resultsForRS : responses.getResults().entrySet()) { boolean regionFailureRegistered = false; - for (Pair regionResult : resultsForRS.getValue()) { - Object result = regionResult.getSecond(); + for (Map.Entry regionResult : resultsForRS.getValue().result.entrySet()) { + Object result = regionResult.getValue(); // Failure: retry if it's make sense else update the errors lists if (result == null || result instanceof Throwable) { throwable = (Throwable) result; - Action correspondingAction = initialActions.get(regionResult.getFirst()); + Action correspondingAction = initialActions.get(regionResult.getKey()); Row row = correspondingAction.getAction(); failureCount++; if (!regionFailureRegistered) { // We're doing this once per location. @@ -889,14 +907,8 @@ class AsyncProcess { } } else { // success - if (AsyncProcess.this.hConnection.getConnectionMetrics() != null) { - AsyncProcess.this.hConnection.getConnectionMetrics(). - updateServerStats(location.getServerName(), - location.getRegionInfo().getRegionName(), result); - } - if (callback != null || batchCallback != null) { - int index = regionResult.getFirst(); + int index = regionResult.getKey(); Action correspondingAction = initialActions.get(index); Row row = correspondingAction.getAction(); if (callback != null) { @@ -940,19 +952,19 @@ class AsyncProcess { } logAndResubmit(initialActions, location, toReplay, numAttempt, failureCount, - throwable, errorsByServer); + throwable, errorsByServer, callable); } private String createLog(int numAttempt, int failureCount, int replaySize, ServerName sn, - Throwable error, long backOffTime, boolean willRetry, String startTime){ + Throwable error, long backOffTime, boolean willRetry, String startTime) { StringBuilder sb = new StringBuilder(); sb.append("#").append(id).append(", table=").append(tableName). - append(", attempt=").append(numAttempt).append("/").append(numTries).append(" "); + append(", attempt=").append(numAttempt).append("/").append(numTries).append(" "); - if (failureCount > 0 || error != null){ + if (failureCount > 0 || error != null) { sb.append("failed ").append(failureCount).append(" ops").append(", last exception: "). - append(error == null ? "null" : error); + append(error == null ? "null" : error); } else { sb.append("SUCCEEDED"); } @@ -963,7 +975,7 @@ class AsyncProcess { if (willRetry) { sb.append(", retrying after ").append(backOffTime).append(" ms"). - append(", replay ").append(replaySize).append(" ops."); + append(", replay ").append(replaySize).append(" ops."); } else if (failureCount > 0) { sb.append(" - FAILED, NOT RETRYING ANYMORE"); } @@ -971,6 +983,22 @@ class AsyncProcess { return sb.toString(); } + private void updateStats(ServerName server, Map results) { + boolean metrics = AsyncProcess.this.hConnection.getConnectionMetrics() != null; + boolean stats = AsyncProcess.this.hConnection.getStatisticsTracker() != null; + if (!stats && !metrics) { + return; + } + for (Map.Entry regionStats : results.entrySet()) { + byte[] regionName = regionStats.getKey(); + ClientProtos.RegionLoadStats stat = regionStats.getValue().getStat(); + ResultStatsUtil.updateStats(AsyncProcess.this.hConnection.getStatisticsTracker(), server, + regionName, stat); + ResultStatsUtil.updateStats(AsyncProcess.this.hConnection.getConnectionMetrics(), + server, regionName, stat); + } + } + /** * Waits for another task to finish. * @param currentNumberOfTask - the number of task finished when calling the method. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index d281916..9fd8fb3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -59,9 +59,11 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.ipc.RegionCoprocessorRpcChannel; +import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; @@ -1014,6 +1016,8 @@ public class HTable implements HTableInterface { try { do { + // before we submit, we should clear errors firstly + ap.clearErrors(); ap.submit(writeAsyncBuffer, true); } while (synchronous && !writeAsyncBuffer.isEmpty()); @@ -1053,33 +1057,46 @@ public class HTable implements HTableInterface { */ @Override public void mutateRow(final RowMutations rm) throws IOException { - RegionServerCallable callable = - new RegionServerCallable(connection, getName(), rm.getRow()) { - public Void call() throws IOException { - try { - RegionAction.Builder regionMutationBuilder = RequestConverter.buildRegionAction( - getLocation().getRegionInfo().getRegionName(), rm); - regionMutationBuilder.setAtomic(true); - MultiRequest request = - MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build(); - PayloadCarryingRpcController controller = rpcControllerFactory.newController(); + final RetryingTimeTracker tracker = new RetryingTimeTracker(); + PayloadCarryingServerCallable callable = + new PayloadCarryingServerCallable(connection, getName(), rm.getRow(), + rpcControllerFactory) { + @Override + public MultiResponse call() throws IOException { + tracker.start(); controller.setPriority(tableName); - ClientProtos.MultiResponse response = getStub().multi(controller, request); - ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); - if (res.hasException()) { - Throwable ex = ProtobufUtil.toException(res.getException()); - if(ex instanceof IOException) { - throw (IOException)ex; + int remainingTime = tracker.getRemainingTime(operationTimeout); + if (remainingTime == 0) { + throw new DoNotRetryIOException("Timeout for mutate row"); + } + RpcClient.setRpcTimeout(remainingTime); + try { + RegionAction.Builder regionMutationBuilder = RequestConverter.buildRegionAction( + getLocation().getRegionInfo().getRegionName(), rm); + regionMutationBuilder.setAtomic(true); + MultiRequest request = + MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build(); + ClientProtos.MultiResponse response = getStub().multi(controller, request); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if (ex instanceof IOException) { + throw (IOException) ex; + } + throw new IOException("Failed to mutate row: " + + Bytes.toStringBinary(rm.getRow()), ex); } - throw new IOException("Failed to mutate row: "+Bytes.toStringBinary(rm.getRow()), ex); + return ResponseConverter.getResults(request, response, controller.cellScanner()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); } - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); } - return null; - } - }; - rpcCallerFactory. newCaller().callWithRetries(callable, this.operationTimeout); + }; + ap.submitAll(rm.getMutations(), null, callable); + ap.waitUntilDone(); + if (ap.hasError()) { + throw ap.getErrors(); + } } /** @@ -1269,34 +1286,58 @@ public class HTable implements HTableInterface { public boolean checkAndMutate(final byte [] row, final byte [] family, final byte [] qualifier, final CompareOp compareOp, final byte [] value, final RowMutations rm) throws IOException { - RegionServerCallable callable = - new RegionServerCallable(connection, getName(), row) { - @Override - public Boolean call() throws IOException { - PayloadCarryingRpcController controller = rpcControllerFactory.newController(); - controller.setPriority(tableName); - try { - CompareType compareType = CompareType.valueOf(compareOp.name()); - MultiRequest request = RequestConverter.buildMutateRequest( - getLocation().getRegionInfo().getRegionName(), row, family, qualifier, - new BinaryComparator(value), compareType, rm); - ClientProtos.MultiResponse response = getStub().multi(controller, request); - ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); - if (res.hasException()) { - Throwable ex = ProtobufUtil.toException(res.getException()); - if(ex instanceof IOException) { - throw (IOException)ex; - } - throw new IOException("Failed to checkAndMutate row: "+ - Bytes.toStringBinary(rm.getRow()), ex); + final RetryingTimeTracker tracker = new RetryingTimeTracker(); + PayloadCarryingServerCallable callable = + new PayloadCarryingServerCallable(connection, getName(), rm.getRow(), + rpcControllerFactory) { + @Override + public MultiResponse call() throws IOException { + tracker.start(); + controller.setPriority(tableName); + int remainingTime = tracker.getRemainingTime(operationTimeout); + if (remainingTime == 0) { + throw new DoNotRetryIOException("Timeout for mutate row"); + } + RpcClient.setRpcTimeout(remainingTime); + try { + RegionAction.Builder regionMutationBuilder = RequestConverter.buildRegionAction( + getLocation().getRegionInfo().getRegionName(), rm); + regionMutationBuilder.setAtomic(true); + MultiRequest request = + MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build(); + ClientProtos.MultiResponse response = getStub().multi(controller, request); + response.getRegionActionResult(0).getResultOrException(0).getResult(); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if (ex instanceof IOException) { + throw (IOException) ex; } - return Boolean.valueOf(response.getProcessed()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); + throw new IOException("Failed to mutate row: " + + Bytes.toStringBinary(rm.getRow()), ex); } + return ResponseConverter.getResults(request, response, controller.cellScanner()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); } - }; - return rpcCallerFactory. newCaller().callWithRetries(callable, this.operationTimeout); + } + }; + /** + * Currently, we use one array to store 'processed' flag which return by server. + * It is some excessive, but that its required by the framework right now + * */ + final boolean[] processed = new boolean[1]; + ap.submitAll(rm.getMutations(), new Batch.Callback() { + @Override + public void update(byte[] region, byte[] row, Object result) { + processed[0] = ((Result)result).getExists(); + } + }, callable); + ap.waitUntilDone(); + if (ap.hasError()) { + throw ap.getErrors(); + } + return processed[0]; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 2be9264..ba753db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -522,7 +522,7 @@ public class HTableMultiplexer { try { HConnectionManager.ServerErrorTracker errorsByServer = new HConnectionManager.ServerErrorTracker(1, 10); - ap.sendMultiAction(retainedActions, actionsByServer, 10, errorsByServer, null); + ap.sendMultiAction(retainedActions, actionsByServer, 10, errorsByServer, null, null); ap.waitUntilDone(); if (ap.hasError()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index fd647e1..f73356a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -50,7 +50,7 @@ import java.util.concurrent.TimeUnit; * {@link #shutdown()} to terminate the thread pools they allocate. */ @InterfaceAudience.Private -public class MetricsConnection { +public class MetricsConnection implements StatisticTrackable { /** Set this key to {@code true} to enable metrics collection of client requests. */ public static final String CLIENT_SIDE_METRICS_ENABLED_KEY = "hbase.client.metrics.enable"; @@ -191,9 +191,15 @@ public class MetricsConnection { } Result result = (Result) r; ClientProtos.RegionLoadStats stats = result.getStats(); - if(stats == null){ + if (stats == null) { return; } + updateRegionStats(serverName, regionName, stats); + } + + @Override + public void updateRegionStats(ServerName serverName, byte[] regionName, + ClientProtos.RegionLoadStats stats) { String name = serverName.getServerName() + "," + Bytes.toStringBinary(regionName); ConcurrentMap rsStats = null; if (serverStats.containsKey(serverName)) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 95d3ceb..9e23491 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -21,13 +21,16 @@ package org.apache.hadoop.hbase.client; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec; /** * A container for Result objects, grouped by regionName. @@ -35,10 +38,10 @@ import org.apache.hadoop.hbase.util.Pair; @InterfaceAudience.Private public class MultiResponse { - // map of regionName to list of (Results paired to the original index for that - // Result) - private Map>> results = - new TreeMap>>(Bytes.BYTES_COMPARATOR); + + // map of regionName to map of Results by the original index for that Result + private Map results = + new TreeMap(Bytes.BYTES_COMPARATOR); /** * The server can send us a failure for the region itself, instead of individual failure. @@ -56,8 +59,8 @@ public class MultiResponse { */ public int size() { int size = 0; - for (Collection c : results.values()) { - size += c.size(); + for (RegionResult result: results.values()) { + size += result.size(); } return size; } @@ -66,26 +69,12 @@ public class MultiResponse { * Add the pair to the container, grouped by the regionName * * @param regionName - * @param r * First item in the pair is the original index of the Action * (request). Second item is the Result. Result will be empty for * successful Put and Delete actions. */ - public void add(byte[] regionName, Pair r) { - List> rs = results.get(regionName); - if (rs == null) { - rs = new ArrayList>(); - results.put(regionName, rs); - } - rs.add(r); - } - - public void add(byte []regionName, int originalIndex, Object resOrEx) { - add(regionName, new Pair(originalIndex, resOrEx)); - } - - public Map>> getResults() { - return results; + public void add(byte[] regionName, int originalIndex, Object resOrEx) { + getResult(regionName).addResult(originalIndex, resOrEx); } public void addException(byte []regionName, Throwable ie){ @@ -102,4 +91,42 @@ public class MultiResponse { public Map getExceptions() { return exceptions; } + + public void addStatistic(byte[] regionName, ClientProtos.RegionLoadStats stat) { + getResult(regionName).setStat(stat); + } + + private RegionResult getResult(byte[] region){ + RegionResult rs = results.get(region); + if (rs == null) { + rs = new RegionResult(); + results.put(region, rs); + } + return rs; + } + + public Map getResults(){ + return this.results; + } + + static class RegionResult{ + Map result = new HashMap(); + ClientProtos.RegionLoadStats stat; + + public void addResult(int index, Object result){ + this.result.put(index, result); + } + + public void setStat(ClientProtos.RegionLoadStats stat){ + this.stat = stat; + } + + public int size() { + return this.result.size(); + } + + public ClientProtos.RegionLoadStats getStat() { + return this.stat; + } + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index 1241e02..add7dc1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; @@ -47,7 +48,7 @@ import com.google.protobuf.ServiceException; * {@link RegionServerCallable} that goes against multiple regions. * @param */ -class MultiServerCallable extends RegionServerCallable { +class MultiServerCallable extends PayloadCarryingServerCallable { private final MultiAction multiAction; private final boolean cellBlock; private RpcControllerFactory rpcFactory; @@ -55,7 +56,7 @@ class MultiServerCallable extends RegionServerCallable { MultiServerCallable(final HConnection connection, final TableName tableName, final HRegionLocation location, final RpcControllerFactory rpcFactory, final MultiAction multi) { - super(connection, tableName, null); + super(connection, tableName, null, rpcFactory); this.multiAction = multi; this.rpcFactory = rpcFactory; setLocation(location); @@ -116,8 +117,6 @@ class MultiServerCallable extends RegionServerCallable { return ResponseConverter.getResults(requestProto, responseProto, controller.cellScanner()); } - - /** * @return True if we should send data in cellblocks. This is an expensive call. Cache the * result if you can rather than call each time. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PayloadCarryingServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PayloadCarryingServerCallable.java new file mode 100644 index 0000000..24fd3c6 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PayloadCarryingServerCallable.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; + +import java.io.IOException; + +/** + * This class is used to unify HTable calls with AsyncProcess Framework. + * HTable can use AsyncProcess directly though this class. + */ +@InterfaceAudience.Private +public abstract class PayloadCarryingServerCallable + extends RegionServerCallable { + protected PayloadCarryingRpcController controller; + + public PayloadCarryingServerCallable(HConnection connection, TableName tableName, byte[] row, + RpcControllerFactory rpcControllerFactory) { + super(connection, tableName, row); + this.controller = rpcControllerFactory.newController(); + } + + public void cancel() { + controller.startCancel(); + } + + public boolean isCancelled() { + return controller.isCanceled(); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java index 3398d7d..583bd62 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultStatsUtil.java @@ -58,31 +58,14 @@ public final class ResultStatsUtil { if (stats == null) { return r; } - serverStats.updateRegionStats(server, regionName, stats); + updateStats(serverStats, server, regionName, stats); return r; } - public static T updateStats(T r, ServerStatisticTracker stats, - HRegionLocation regionLocation) { - // Writes submitted using multi() will receive MultiResponses - if (r instanceof MultiResponse) { - MultiResponse mr = (MultiResponse) r; - for (Map.Entry>> e: mr.getResults().entrySet()) { - byte[] regionName = e.getKey(); - for (Pair regionResult : e.getValue()) { - Object o = regionResult.getSecond(); - if (o instanceof Result) { - Result result = (Result) o; - ClientProtos.RegionLoadStats loadStats = result.getStats(); - if (loadStats != null) { - stats.updateRegionStats(regionLocation.getServerName(), regionName, loadStats); - // Once we have stats for one region we can move on to the next - break; - } - } - } - } + public static void updateStats(StatisticTrackable tracker, ServerName server, byte[] regionName, + ClientProtos.RegionLoadStats stats) { + if (regionName != null && stats != null && tracker != null) { + tracker.updateRegionStats(server, regionName, stats); } - return r; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java new file mode 100644 index 0000000..c8498e0 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingTimeTracker.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +/** + * Tracks the amount of time remaining for an operation. + */ +class RetryingTimeTracker { + + private long globalStartTime = -1; + + public void start() { + if (this.globalStartTime < 0) { + this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis(); + } + } + + public int getRemainingTime(int callTimeout) { + if (callTimeout <= 0) { + return 0; + } else { + if (callTimeout == Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } + int remainingTime = (int) ( + callTimeout - + (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime)); + if (remainingTime < 1) { + // If there is no time left, we're trying anyway. It's too late. + // 0 means no timeout, and it's not the intent here. So we secure both cases by + // resetting to the minimum. + remainingTime = 1; + } + return remainingTime; + } + } + + public long getStartTime() { + return this.globalStartTime; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java index dd9c725..1f83b0d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java @@ -64,13 +64,8 @@ public class RpcRetryingCallerFactory { public RpcRetryingCaller newCaller() { // We store the values in the factory instance. This way, constructing new objects // is cheap as it does not require parsing a complex structure. - RpcRetryingCaller caller; - if (enableBackPressure && this.stats != null) { - caller = new StatsTrackingRpcRetryingCaller(pause, retries, startLogErrorsCnt, - this.stats); - } else { - caller = new RpcRetryingCaller(pause, retries, startLogErrorsCnt); - } + RpcRetryingCaller caller = new RpcRetryingCaller(pause, retries, + startLogErrorsCnt); return caller; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java index 0c7b683..1ca9ed2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java @@ -32,11 +32,12 @@ import java.util.concurrent.ConcurrentHashMap; * Tracks the statistics for multiple regions */ @InterfaceAudience.Private -public class ServerStatisticTracker { +public class ServerStatisticTracker implements StatisticTrackable { private final Map stats = new ConcurrentHashMap(); + @Override public void updateRegionStats(ServerName server, byte[] region, ClientProtos.RegionLoadStats currentStats) { ServerStatistics stat = stats.get(server); @@ -71,4 +72,4 @@ public class ServerStatisticTracker { ServerStatistics getServerStatsForTesting(ServerName server) { return stats.get(server); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java new file mode 100644 index 0000000..7bb49e7 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatisticTrackable.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; + +/** + * Parent interface for an object to get updates about per-region statistics. + */ +@InterfaceAudience.Private +public interface StatisticTrackable { + /** + * Update stats per region. + * */ + void updateRegionStats(ServerName server, byte[] region, ClientProtos.RegionLoadStats + stats); +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java deleted file mode 100644 index 2a5f0ec..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.classification.InterfaceAudience; - -import java.io.IOException; - -/** - * An {@link RpcRetryingCaller} that will update the per-region stats for the call on return, - * if stats are available - */ -@InterfaceAudience.Private -public class StatsTrackingRpcRetryingCaller extends RpcRetryingCaller { - private final ServerStatisticTracker stats; - - public StatsTrackingRpcRetryingCaller(long pause, int retries, int startLogErrorsCnt, - ServerStatisticTracker stats) { - super(pause, retries, startLogErrorsCnt); - this.stats = stats; - } - - @Override - public T callWithRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException { - T result = super.callWithRetries(callable, callTimeout); - return updateStatsAndUnwrap(result, callable); - } - - @Override - public T callWithoutRetries(RetryingCallable callable, int callTimeout) - throws IOException, RuntimeException { - T result = super.callWithRetries(callable, callTimeout); - return updateStatsAndUnwrap(result, callable); - } - - private T updateStatsAndUnwrap(T result, RetryingCallable callable) { - // don't track stats about requests that aren't to regionservers - if (!(callable instanceof RegionServerCallable)) { - return result; - } - - RegionServerCallable regionCallable = (RegionServerCallable) callable; - HRegionLocation location = regionCallable.getLocation(); - return ResultStatsUtil.updateStats(result, stats, location); - } -} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 15c937e..62fbb20 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -178,8 +178,15 @@ public final class ProtobufUtil { */ private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{}; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); - private final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); - private final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); + + final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); + final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); + private final static Result EMPTY_RESULT_STALE = Result.create(null, true); + private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE + = Result.create(null, true); + private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE + = Result.create(null, true); + private final static ClientProtos.Result EMPTY_RESULT_PB; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index 7772abf..f2dc18c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -86,7 +86,7 @@ public final class ResponseConverter { int requestRegionActionCount = request.getRegionActionCount(); int responseRegionActionResultCount = response.getRegionActionResultCount(); if (requestRegionActionCount != responseRegionActionResultCount) { - throw new IllegalStateException("Request mutation count=" + responseRegionActionResultCount + + throw new IllegalStateException("Request mutation count=" + requestRegionActionCount + " does not match response mutation result count=" + responseRegionActionResultCount); } @@ -122,21 +122,27 @@ public final class ResponseConverter { responseValue = ProtobufUtil.toException(roe.getException()); } else if (roe.hasResult()) { responseValue = ProtobufUtil.toResult(roe.getResult(), cells); - // add the load stats, if we got any - if (roe.hasLoadStats()) { - ((Result) responseValue).addResults(roe.getLoadStats()); - } } else if (roe.hasServiceResult()) { responseValue = roe.getServiceResult(); - } else { - // no result & no exception. Unexpected. - throw new IllegalStateException("No result & no exception roe=" + roe + - " for region " + actions.getRegion()); + } else{ + // Sometimes, the response is just "it was processed". Generally, this occurs for things + // like mutateRows where either we get back 'processed' (or not) and optionally some + // statistics about the regions we touched. + responseValue = response.getProcessed() ? + ProtobufUtil.EMPTY_RESULT_EXISTS_TRUE : + ProtobufUtil.EMPTY_RESULT_EXISTS_FALSE; } results.add(regionName, roe.getIndex(), responseValue); } } + if (response.hasRegionStatistics()) { + ClientProtos.MultiRegionLoadStats stats = response.getRegionStatistics(); + for (int i = 0; i < stats.getRegionCount(); i++) { + results.addStatistic(stats.getRegion(i).getValue().toByteArray(), stats.getStat(i)); + } + } + return results; } @@ -156,14 +162,11 @@ public final class ResponseConverter { * Wrap a throwable to an action result. * * @param r - * @param stats * @return an action result builder */ - public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r, - ClientProtos.RegionLoadStats stats) { + public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) { ResultOrException.Builder builder = ResultOrException.newBuilder(); if (r != null) builder.setResult(r); - if (stats != null) builder.setLoadStats(stats); return builder; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index f414086..caa01d9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -106,7 +106,6 @@ public class TestAsyncProcess { callback, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); } - @Override protected RpcRetryingCaller createCaller(MultiServerCallable callable) { final MultiResponse mr = createMultiResponse(callable.getLocation(), callable.getMulti(), nbMultiResponse, nbActions); @@ -153,7 +152,6 @@ public class TestAsyncProcess { } } - static MultiResponse createMultiResponse(final HRegionLocation loc, final MultiAction multi, AtomicInteger nbMultiResponse, AtomicInteger nbActions) { final MultiResponse mr = new MultiResponse(); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 805aadb..c23b77e 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -26701,6 +26701,1095 @@ public final class ClientProtos { // @@protoc_insertion_point(class_scope:RegionLoadStats) } + public interface MultiRegionLoadStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .RegionSpecifier region = 1; + /** + * repeated .RegionSpecifier region = 1; + */ + java.util.List + getRegionList(); + /** + * repeated .RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index); + /** + * repeated .RegionSpecifier region = 1; + */ + int getRegionCount(); + /** + * repeated .RegionSpecifier region = 1; + */ + java.util.List + getRegionOrBuilderList(); + /** + * repeated .RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index); + + // repeated .RegionLoadStats stat = 2; + /** + * repeated .RegionLoadStats stat = 2; + */ + java.util.List + getStatList(); + /** + * repeated .RegionLoadStats stat = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getStat(int index); + /** + * repeated .RegionLoadStats stat = 2; + */ + int getStatCount(); + /** + * repeated .RegionLoadStats stat = 2; + */ + java.util.List + getStatOrBuilderList(); + /** + * repeated .RegionLoadStats stat = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getStatOrBuilder( + int index); + } + /** + * Protobuf type {@code MultiRegionLoadStats} + */ + public static final class MultiRegionLoadStats extends + com.google.protobuf.GeneratedMessage + implements MultiRegionLoadStatsOrBuilder { + // Use MultiRegionLoadStats.newBuilder() to construct. + private MultiRegionLoadStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MultiRegionLoadStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MultiRegionLoadStats defaultInstance; + public static MultiRegionLoadStats getDefaultInstance() { + return defaultInstance; + } + + public MultiRegionLoadStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MultiRegionLoadStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + region_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry)); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + stat_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + stat_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + stat_ = java.util.Collections.unmodifiableList(stat_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_MultiRegionLoadStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_MultiRegionLoadStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MultiRegionLoadStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MultiRegionLoadStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .RegionSpecifier region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private java.util.List region_; + /** + * repeated .RegionSpecifier region = 1; + */ + public java.util.List getRegionList() { + return region_; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public java.util.List + getRegionOrBuilderList() { + return region_; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public int getRegionCount() { + return region_.size(); + } + /** + * repeated .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + return region_.get(index); + } + /** + * repeated .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + return region_.get(index); + } + + // repeated .RegionLoadStats stat = 2; + public static final int STAT_FIELD_NUMBER = 2; + private java.util.List stat_; + /** + * repeated .RegionLoadStats stat = 2; + */ + public java.util.List getStatList() { + return stat_; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public java.util.List + getStatOrBuilderList() { + return stat_; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public int getStatCount() { + return stat_.size(); + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getStat(int index) { + return stat_.get(index); + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getStatOrBuilder( + int index) { + return stat_.get(index); + } + + private void initFields() { + region_ = java.util.Collections.emptyList(); + stat_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < region_.size(); i++) { + output.writeMessage(1, region_.get(i)); + } + for (int i = 0; i < stat_.size(); i++) { + output.writeMessage(2, stat_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < region_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_.get(i)); + } + for (int i = 0; i < stat_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, stat_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats) obj; + + boolean result = true; + result = result && getRegionList() + .equals(other.getRegionList()); + result = result && getStatList() + .equals(other.getStatList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getRegionCount() > 0) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegionList().hashCode(); + } + if (getStatCount() > 0) { + hash = (37 * hash) + STAT_FIELD_NUMBER; + hash = (53 * hash) + getStatList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MultiRegionLoadStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_MultiRegionLoadStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_MultiRegionLoadStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + getStatFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + regionBuilder_.clear(); + } + if (statBuilder_ == null) { + stat_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + statBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_MultiRegionLoadStats_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats build() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats(this); + int from_bitField0_ = bitField0_; + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + if (statBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + stat_ = java.util.Collections.unmodifiableList(stat_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.stat_ = stat_; + } else { + result.stat_ = statBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.getDefaultInstance()) return this; + if (regionBuilder_ == null) { + if (!other.region_.isEmpty()) { + if (region_.isEmpty()) { + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRegionIsMutable(); + region_.addAll(other.region_); + } + onChanged(); + } + } else { + if (!other.region_.isEmpty()) { + if (regionBuilder_.isEmpty()) { + regionBuilder_.dispose(); + regionBuilder_ = null; + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + regionBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionFieldBuilder() : null; + } else { + regionBuilder_.addAllMessages(other.region_); + } + } + } + if (statBuilder_ == null) { + if (!other.stat_.isEmpty()) { + if (stat_.isEmpty()) { + stat_ = other.stat_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStatIsMutable(); + stat_.addAll(other.stat_); + } + onChanged(); + } + } else { + if (!other.stat_.isEmpty()) { + if (statBuilder_.isEmpty()) { + statBuilder_.dispose(); + statBuilder_ = null; + stat_ = other.stat_; + bitField0_ = (bitField0_ & ~0x00000002); + statBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStatFieldBuilder() : null; + } else { + statBuilder_.addAllMessages(other.stat_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .RegionSpecifier region = 1; + private java.util.List region_ = + java.util.Collections.emptyList(); + private void ensureRegionIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(region_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + + /** + * repeated .RegionSpecifier region = 1; + */ + public java.util.List getRegionList() { + if (regionBuilder_ == null) { + return java.util.Collections.unmodifiableList(region_); + } else { + return regionBuilder_.getMessageList(); + } + } + /** + * repeated .RegionSpecifier region = 1; + */ + public int getRegionCount() { + if (regionBuilder_ == null) { + return region_.size(); + } else { + return regionBuilder_.getCount(); + } + } + /** + * repeated .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + if (regionBuilder_ == null) { + return region_.get(index); + } else { + return regionBuilder_.getMessage(index); + } + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder setRegion( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.set(index, value); + onChanged(); + } else { + regionBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder setRegion( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.set(index, builderForValue.build()); + onChanged(); + } else { + regionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder addRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.add(value); + onChanged(); + } else { + regionBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder addRegion( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.add(index, value); + onChanged(); + } else { + regionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder addRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(builderForValue.build()); + onChanged(); + } else { + regionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder addRegion( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(index, builderForValue.build()); + onChanged(); + } else { + regionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder addAllRegion( + java.lang.Iterable values) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + super.addAll(values, region_); + onChanged(); + } else { + regionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + regionBuilder_.clear(); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public Builder removeRegion(int index) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.remove(index); + onChanged(); + } else { + regionBuilder_.remove(index); + } + return this; + } + /** + * repeated .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder( + int index) { + return getRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + if (regionBuilder_ == null) { + return region_.get(index); } else { + return regionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .RegionSpecifier region = 1; + */ + public java.util.List + getRegionOrBuilderList() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(region_); + } + } + /** + * repeated .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder() { + return getRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + } + /** + * repeated .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder( + int index) { + return getRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + } + /** + * repeated .RegionSpecifier region = 1; + */ + public java.util.List + getRegionBuilderList() { + return getRegionFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // repeated .RegionLoadStats stat = 2; + private java.util.List stat_ = + java.util.Collections.emptyList(); + private void ensureStatIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + stat_ = new java.util.ArrayList(stat_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder> statBuilder_; + + /** + * repeated .RegionLoadStats stat = 2; + */ + public java.util.List getStatList() { + if (statBuilder_ == null) { + return java.util.Collections.unmodifiableList(stat_); + } else { + return statBuilder_.getMessageList(); + } + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public int getStatCount() { + if (statBuilder_ == null) { + return stat_.size(); + } else { + return statBuilder_.getCount(); + } + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getStat(int index) { + if (statBuilder_ == null) { + return stat_.get(index); + } else { + return statBuilder_.getMessage(index); + } + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder setStat( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { + if (statBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatIsMutable(); + stat_.set(index, value); + onChanged(); + } else { + statBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder setStat( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder builderForValue) { + if (statBuilder_ == null) { + ensureStatIsMutable(); + stat_.set(index, builderForValue.build()); + onChanged(); + } else { + statBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder addStat(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { + if (statBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatIsMutable(); + stat_.add(value); + onChanged(); + } else { + statBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder addStat( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { + if (statBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatIsMutable(); + stat_.add(index, value); + onChanged(); + } else { + statBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder addStat( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder builderForValue) { + if (statBuilder_ == null) { + ensureStatIsMutable(); + stat_.add(builderForValue.build()); + onChanged(); + } else { + statBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder addStat( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder builderForValue) { + if (statBuilder_ == null) { + ensureStatIsMutable(); + stat_.add(index, builderForValue.build()); + onChanged(); + } else { + statBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder addAllStat( + java.lang.Iterable values) { + if (statBuilder_ == null) { + ensureStatIsMutable(); + super.addAll(values, stat_); + onChanged(); + } else { + statBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder clearStat() { + if (statBuilder_ == null) { + stat_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + statBuilder_.clear(); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public Builder removeStat(int index) { + if (statBuilder_ == null) { + ensureStatIsMutable(); + stat_.remove(index); + onChanged(); + } else { + statBuilder_.remove(index); + } + return this; + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder getStatBuilder( + int index) { + return getStatFieldBuilder().getBuilder(index); + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getStatOrBuilder( + int index) { + if (statBuilder_ == null) { + return stat_.get(index); } else { + return statBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public java.util.List + getStatOrBuilderList() { + if (statBuilder_ != null) { + return statBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(stat_); + } + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder addStatBuilder() { + return getStatFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance()); + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder addStatBuilder( + int index) { + return getStatFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance()); + } + /** + * repeated .RegionLoadStats stat = 2; + */ + public java.util.List + getStatBuilderList() { + return getStatFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder> + getStatFieldBuilder() { + if (statBuilder_ == null) { + statBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder>( + stat_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + stat_ = null; + } + return statBuilder_; + } + + // @@protoc_insertion_point(builder_scope:MultiRegionLoadStats) + } + + static { + defaultInstance = new MultiRegionLoadStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MultiRegionLoadStats) + } + public interface ResultOrExceptionOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -26778,31 +27867,31 @@ public final class ClientProtos { */ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResultOrBuilder getServiceResultOrBuilder(); - // optional .RegionLoadStats loadStats = 5; + // optional .RegionLoadStats loadStats = 5 [deprecated = true]; /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
      * current load on the region
      * 
*/ - boolean hasLoadStats(); + @java.lang.Deprecated boolean hasLoadStats(); /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
      * current load on the region
      * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats(); + @java.lang.Deprecated org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats(); /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
      * current load on the region
      * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder(); + @java.lang.Deprecated org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder(); } /** * Protobuf type {@code ResultOrException} @@ -27063,37 +28152,37 @@ public final class ClientProtos { return serviceResult_; } - // optional .RegionLoadStats loadStats = 5; + // optional .RegionLoadStats loadStats = 5 [deprecated = true]; public static final int LOADSTATS_FIELD_NUMBER = 5; private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats loadStats_; /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
      * current load on the region
      * 
*/ - public boolean hasLoadStats() { + @java.lang.Deprecated public boolean hasLoadStats() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
      * current load on the region
      * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats() { + @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats() { return loadStats_; } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
      * current load on the region
      * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder() { + @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder() { return loadStats_; } @@ -27973,28 +29062,28 @@ public final class ClientProtos { return serviceResultBuilder_; } - // optional .RegionLoadStats loadStats = 5; + // optional .RegionLoadStats loadStats = 5 [deprecated = true]; private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats loadStats_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder> loadStatsBuilder_; /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public boolean hasLoadStats() { + @java.lang.Deprecated public boolean hasLoadStats() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats() { + @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats() { if (loadStatsBuilder_ == null) { return loadStats_; } else { @@ -28002,13 +29091,13 @@ public final class ClientProtos { } } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public Builder setLoadStats(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { + @java.lang.Deprecated public Builder setLoadStats(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { if (loadStatsBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -28022,13 +29111,13 @@ public final class ClientProtos { return this; } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public Builder setLoadStats( + @java.lang.Deprecated public Builder setLoadStats( org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder builderForValue) { if (loadStatsBuilder_ == null) { loadStats_ = builderForValue.build(); @@ -28040,13 +29129,13 @@ public final class ClientProtos { return this; } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public Builder mergeLoadStats(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { + @java.lang.Deprecated public Builder mergeLoadStats(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { if (loadStatsBuilder_ == null) { if (((bitField0_ & 0x00000010) == 0x00000010) && loadStats_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance()) { @@ -28063,13 +29152,13 @@ public final class ClientProtos { return this; } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public Builder clearLoadStats() { + @java.lang.Deprecated public Builder clearLoadStats() { if (loadStatsBuilder_ == null) { loadStats_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance(); onChanged(); @@ -28080,25 +29169,25 @@ public final class ClientProtos { return this; } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder getLoadStatsBuilder() { + @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder getLoadStatsBuilder() { bitField0_ |= 0x00000010; onChanged(); return getLoadStatsFieldBuilder().getBuilder(); } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder() { + @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder() { if (loadStatsBuilder_ != null) { return loadStatsBuilder_.getMessageOrBuilder(); } else { @@ -28106,7 +29195,7 @@ public final class ClientProtos { } } /** - * optional .RegionLoadStats loadStats = 5; + * optional .RegionLoadStats loadStats = 5 [deprecated = true]; * *
        * current load on the region
@@ -30234,6 +31323,20 @@ public final class ClientProtos {
      * 
*/ boolean getProcessed(); + + // optional .MultiRegionLoadStats regionStatistics = 3; + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + boolean hasRegionStatistics(); + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats getRegionStatistics(); + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder getRegionStatisticsOrBuilder(); } /** * Protobuf type {@code MultiResponse} @@ -30299,6 +31402,19 @@ public final class ClientProtos { processed_ = input.readBool(); break; } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionStatistics_.toBuilder(); + } + regionStatistics_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionStatistics_); + regionStatistics_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -30402,9 +31518,32 @@ public final class ClientProtos { return processed_; } + // optional .MultiRegionLoadStats regionStatistics = 3; + public static final int REGIONSTATISTICS_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats regionStatistics_; + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public boolean hasRegionStatistics() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats getRegionStatistics() { + return regionStatistics_; + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder getRegionStatisticsOrBuilder() { + return regionStatistics_; + } + private void initFields() { regionActionResult_ = java.util.Collections.emptyList(); processed_ = false; + regionStatistics_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -30417,6 +31556,12 @@ public final class ClientProtos { return false; } } + if (hasRegionStatistics()) { + if (!getRegionStatistics().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -30430,6 +31575,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(2, processed_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(3, regionStatistics_); + } getUnknownFields().writeTo(output); } @@ -30447,6 +31595,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, processed_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, regionStatistics_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -30477,6 +31629,11 @@ public final class ClientProtos { result = result && (getProcessed() == other.getProcessed()); } + result = result && (hasRegionStatistics() == other.hasRegionStatistics()); + if (hasRegionStatistics()) { + result = result && getRegionStatistics() + .equals(other.getRegionStatistics()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -30498,6 +31655,10 @@ public final class ClientProtos { hash = (37 * hash) + PROCESSED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getProcessed()); } + if (hasRegionStatistics()) { + hash = (37 * hash) + REGIONSTATISTICS_FIELD_NUMBER; + hash = (53 * hash) + getRegionStatistics().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -30600,6 +31761,7 @@ public final class ClientProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionActionResultFieldBuilder(); + getRegionStatisticsFieldBuilder(); } } private static Builder create() { @@ -30616,6 +31778,12 @@ public final class ClientProtos { } processed_ = false; bitField0_ = (bitField0_ & ~0x00000002); + if (regionStatisticsBuilder_ == null) { + regionStatistics_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.getDefaultInstance(); + } else { + regionStatisticsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -30657,6 +31825,14 @@ public final class ClientProtos { to_bitField0_ |= 0x00000001; } result.processed_ = processed_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + if (regionStatisticsBuilder_ == null) { + result.regionStatistics_ = regionStatistics_; + } else { + result.regionStatistics_ = regionStatisticsBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -30702,6 +31878,9 @@ public final class ClientProtos { if (other.hasProcessed()) { setProcessed(other.getProcessed()); } + if (other.hasRegionStatistics()) { + mergeRegionStatistics(other.getRegionStatistics()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -30713,6 +31892,12 @@ public final class ClientProtos { return false; } } + if (hasRegionStatistics()) { + if (!getRegionStatistics().isInitialized()) { + + return false; + } + } return true; } @@ -31024,6 +32209,123 @@ public final class ClientProtos { return this; } + // optional .MultiRegionLoadStats regionStatistics = 3; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats regionStatistics_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder> regionStatisticsBuilder_; + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public boolean hasRegionStatistics() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats getRegionStatistics() { + if (regionStatisticsBuilder_ == null) { + return regionStatistics_; + } else { + return regionStatisticsBuilder_.getMessage(); + } + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public Builder setRegionStatistics(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats value) { + if (regionStatisticsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionStatistics_ = value; + onChanged(); + } else { + regionStatisticsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public Builder setRegionStatistics( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder builderForValue) { + if (regionStatisticsBuilder_ == null) { + regionStatistics_ = builderForValue.build(); + onChanged(); + } else { + regionStatisticsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public Builder mergeRegionStatistics(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats value) { + if (regionStatisticsBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + regionStatistics_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.getDefaultInstance()) { + regionStatistics_ = + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.newBuilder(regionStatistics_).mergeFrom(value).buildPartial(); + } else { + regionStatistics_ = value; + } + onChanged(); + } else { + regionStatisticsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public Builder clearRegionStatistics() { + if (regionStatisticsBuilder_ == null) { + regionStatistics_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.getDefaultInstance(); + onChanged(); + } else { + regionStatisticsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder getRegionStatisticsBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getRegionStatisticsFieldBuilder().getBuilder(); + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder getRegionStatisticsOrBuilder() { + if (regionStatisticsBuilder_ != null) { + return regionStatisticsBuilder_.getMessageOrBuilder(); + } else { + return regionStatistics_; + } + } + /** + * optional .MultiRegionLoadStats regionStatistics = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder> + getRegionStatisticsFieldBuilder() { + if (regionStatisticsBuilder_ == null) { + regionStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder>( + regionStatistics_, + getParentForChildren(), + isClean()); + regionStatistics_ = null; + } + return regionStatisticsBuilder_; + } + // @@protoc_insertion_point(builder_scope:MultiResponse) } @@ -31824,6 +33126,11 @@ public final class ClientProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionLoadStats_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_MultiRegionLoadStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MultiRegionLoadStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_ResultOrException_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -31938,30 +33245,34 @@ public final class ClientProtos { "\022\016\n\006atomic\030\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007.Actio" + "n\"c\n\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001" + "(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compa" + - "ctionPressure\030\003 \001(\005:\0010\"\266\001\n\021ResultOrExcep" + - "tion\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Re" + - "sult\022!\n\texception\030\003 \001(\0132\016.NameBytesPair\022" + - "1\n\016service_result\030\004 \001(\0132\031.CoprocessorSer", - "viceResult\022#\n\tloadStats\030\005 \001(\0132\020.RegionLo" + - "adStats\"f\n\022RegionActionResult\022-\n\021resultO" + - "rException\030\001 \003(\0132\022.ResultOrException\022!\n\t" + - "exception\030\002 \001(\0132\016.NameBytesPair\"f\n\014Multi" + - "Request\022#\n\014regionAction\030\001 \003(\0132\r.RegionAc" + - "tion\022\022\n\nnonceGroup\030\002 \001(\004\022\035\n\tcondition\030\003 " + - "\001(\0132\n.Condition\"S\n\rMultiResponse\022/\n\022regi" + - "onActionResult\030\001 \003(\0132\023.RegionActionResul" + - "t\022\021\n\tprocessed\030\002 \001(\0102\205\003\n\rClientService\022 " + - "\n\003Get\022\013.GetRequest\032\014.GetResponse\022)\n\006Muta", - "te\022\016.MutateRequest\032\017.MutateResponse\022#\n\004S" + - "can\022\014.ScanRequest\032\r.ScanResponse\022>\n\rBulk" + - "LoadHFile\022\025.BulkLoadHFileRequest\032\026.BulkL" + - "oadHFileResponse\022F\n\013ExecService\022\032.Coproc" + - "essorServiceRequest\032\033.CoprocessorService" + - "Response\022R\n\027ExecRegionServerService\022\032.Co" + - "processorServiceRequest\032\033.CoprocessorSer" + - "viceResponse\022&\n\005Multi\022\r.MultiRequest\032\016.M" + - "ultiResponseBB\n*org.apache.hadoop.hbase." + - "protobuf.generatedB\014ClientProtosH\001\210\001\001\240\001\001" + "ctionPressure\030\003 \001(\005:\0010\"X\n\024MultiRegionLoa" + + "dStats\022 \n\006region\030\001 \003(\0132\020.RegionSpecifier" + + "\022\036\n\004stat\030\002 \003(\0132\020.RegionLoadStats\"\272\001\n\021Res" + + "ultOrException\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030", + "\002 \001(\0132\007.Result\022!\n\texception\030\003 \001(\0132\016.Name" + + "BytesPair\0221\n\016service_result\030\004 \001(\0132\031.Copr" + + "ocessorServiceResult\022\'\n\tloadStats\030\005 \001(\0132" + + "\020.RegionLoadStatsB\002\030\001\"f\n\022RegionActionRes" + + "ult\022-\n\021resultOrException\030\001 \003(\0132\022.ResultO" + + "rException\022!\n\texception\030\002 \001(\0132\016.NameByte" + + "sPair\"f\n\014MultiRequest\022#\n\014regionAction\030\001 " + + "\003(\0132\r.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022\035" + + "\n\tcondition\030\003 \001(\0132\n.Condition\"\204\001\n\rMultiR" + + "esponse\022/\n\022regionActionResult\030\001 \003(\0132\023.Re", + "gionActionResult\022\021\n\tprocessed\030\002 \001(\010\022/\n\020r" + + "egionStatistics\030\003 \001(\0132\025.MultiRegionLoadS" + + "tats2\205\003\n\rClientService\022 \n\003Get\022\013.GetReque" + + "st\032\014.GetResponse\022)\n\006Mutate\022\016.MutateReque" + + "st\032\017.MutateResponse\022#\n\004Scan\022\014.ScanReques" + + "t\032\r.ScanResponse\022>\n\rBulkLoadHFile\022\025.Bulk" + + "LoadHFileRequest\032\026.BulkLoadHFileResponse" + + "\022F\n\013ExecService\022\032.CoprocessorServiceRequ" + + "est\032\033.CoprocessorServiceResponse\022R\n\027Exec" + + "RegionServerService\022\032.CoprocessorService", + "Request\032\033.CoprocessorServiceResponse\022&\n\005" + + "Multi\022\r.MultiRequest\032\016.MultiResponseBB\n*" + + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\014ClientProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -32124,30 +33435,36 @@ public final class ClientProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoadStats_descriptor, new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", "CompactionPressure", }); - internal_static_ResultOrException_descriptor = + internal_static_MultiRegionLoadStats_descriptor = getDescriptor().getMessageTypes().get(23); + internal_static_MultiRegionLoadStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MultiRegionLoadStats_descriptor, + new java.lang.String[] { "Region", "Stat", }); + internal_static_ResultOrException_descriptor = + getDescriptor().getMessageTypes().get(24); internal_static_ResultOrException_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ResultOrException_descriptor, new java.lang.String[] { "Index", "Result", "Exception", "ServiceResult", "LoadStats", }); internal_static_RegionActionResult_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(25); internal_static_RegionActionResult_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionActionResult_descriptor, new java.lang.String[] { "ResultOrException", "Exception", }); internal_static_MultiRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(26); internal_static_MultiRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiRequest_descriptor, new java.lang.String[] { "RegionAction", "NonceGroup", "Condition", }); internal_static_MultiResponse_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(27); internal_static_MultiResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiResponse_descriptor, - new java.lang.String[] { "RegionActionResult", "Processed", }); + new java.lang.String[] { "RegionActionResult", "Processed", "RegionStatistics", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 0526d6c..9a45032 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -358,6 +358,11 @@ message RegionLoadStats { optional int32 compactionPressure = 3 [default = 0]; } +message MultiRegionLoadStats{ + repeated RegionSpecifier region = 1; + repeated RegionLoadStats stat = 2; +} + /** * Either a Result or an Exception NameBytesPair (keyed by * exception name whose value is the exception stringified) @@ -372,7 +377,7 @@ message ResultOrException { // result if this was a coprocessor service call optional CoprocessorServiceResult service_result = 4; // current load on the region - optional RegionLoadStats loadStats = 5; + optional RegionLoadStats loadStats = 5 [deprecated=true]; } /** @@ -401,6 +406,7 @@ message MultiResponse { repeated RegionActionResult regionActionResult = 1; // used for mutate to indicate processed only optional bool processed = 2; + optional MultiRegionLoadStats regionStatistics = 3; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 8674308..aa88257 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5197,9 +5197,9 @@ public class HRegion implements HeapSize { // , Writable{ } /** - * @return the current load statistics for the the region + * @return statistics about the current load of the region */ - public ClientProtos.RegionLoadStats getRegionStats() { + public ClientProtos.RegionLoadStats getLoadStatistics() { if (!regionStatsEnabled) { return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 65e52b5..b0d2873 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -3619,13 +3619,15 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa MultiResponse.Builder responseBuilder = MultiResponse.newBuilder(); RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder(); Boolean processed = null; - + Map regionStats = + new HashMap(request.getRegionActionCount()); for (RegionAction regionAction : request.getRegionActionList()) { this.requestCount.add(regionAction.getActionCount()); HRegion region; regionActionResultBuilder.clear(); + RegionSpecifier regionSpecifier = regionAction.getRegion(); try { - region = getRegion(regionAction.getRegion()); + region = getRegion(regionSpecifier); } catch (IOException e) { rpcServer.getMetrics().exception(e); regionActionResultBuilder.setException(ResponseConverter.buildException(e)); @@ -3649,15 +3651,10 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator()); processed = checkAndRowMutate(region, regionAction.getActionList(), - cellScanner, row, family, qualifier, compareOp, comparator); + cellScanner, row, family, qualifier, compareOp, comparator, regionActionResultBuilder); } else { - ClientProtos.RegionLoadStats stats = mutateRows(region, regionAction.getActionList(), - cellScanner); - // add the stats to the request - if (stats != null) { - responseBuilder.addRegionActionResult(RegionActionResult.newBuilder() - .addResultOrException(ResultOrException.newBuilder().setLoadStats(stats))); - } + mutateRows(region, regionAction.getActionList(), + cellScanner, regionActionResultBuilder); processed = Boolean.TRUE; } } catch (IOException e) { @@ -3671,12 +3668,23 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa regionActionResultBuilder, cellsToReturn, nonceGroup); } responseBuilder.addRegionActionResult(regionActionResultBuilder.build()); + ClientProtos.RegionLoadStats regionLoadStats = ((HRegion)region).getLoadStatistics(); + if(regionLoadStats != null) { + regionStats.put(regionSpecifier, regionLoadStats); + } } // Load the controller with the Cells to return. if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) { controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn)); } if (processed != null) responseBuilder.setProcessed(processed); + ClientProtos.MultiRegionLoadStats.Builder builder = + ClientProtos.MultiRegionLoadStats.newBuilder(); + for(Entry stat: regionStats.entrySet()){ + builder.addRegion(stat.getKey()); + builder.addStat(stat.getValue()); + } + responseBuilder.setRegionStatistics(builder); return responseBuilder.build(); } @@ -4677,7 +4685,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa case SUCCESS: builder.addResultOrException(getResultOrException( - ClientProtos.Result.getDefaultInstance(), index, region.getRegionStats())); + ClientProtos.Result.getDefaultInstance(), index)); break; } } @@ -4696,8 +4704,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa } private static ResultOrException getResultOrException(final ClientProtos.Result r, - final int index, final ClientProtos.RegionLoadStats stats) { - return getResultOrException(ResponseConverter.buildActionResult(r, stats), index); + final int index) { + return getResultOrException(ResponseConverter.buildActionResult(r), index); } private static ResultOrException getResultOrException(final Exception e, final int index) { @@ -4769,12 +4777,16 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa * @throws IOException */ protected ClientProtos.RegionLoadStats mutateRows(final HRegion region, - final List actions, final CellScanner cellScanner) + final List actions, final CellScanner cellScanner, + RegionActionResult.Builder builder) throws IOException { if (!region.getRegionInfo().isMetaTable()) { cacheFlusher.reclaimMemStoreMemory(); } RowMutations rm = null; + int i = 0; + ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder = + ClientProtos.ResultOrException.newBuilder(); for (ClientProtos.Action action: actions) { if (action.hasGet()) { throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + @@ -4794,9 +4806,15 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa default: throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name()); } + // To unify the response format with doNonAtomicRegionMutation and read through client's + // AsyncProcess we have to add an empty result instance per operation + resultOrExceptionOrBuilder.clear(); + resultOrExceptionOrBuilder.setIndex(i++); + builder.addResultOrException( + resultOrExceptionOrBuilder.build()); } region.mutateRow(rm); - return region.getRegionStats(); + return region.getLoadStatistics(); } /** @@ -4814,11 +4832,15 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa */ private boolean checkAndRowMutate(final HRegion region, final List actions, final CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, ByteArrayComparable comparator) throws IOException { + CompareOp compareOp, ByteArrayComparable comparator, + RegionActionResult.Builder builder) throws IOException { if (!region.getRegionInfo().isMetaTable()) { cacheFlusher.reclaimMemStoreMemory(); } RowMutations rm = null; + int i = 0; + ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder = + ClientProtos.ResultOrException.newBuilder(); for (ClientProtos.Action action: actions) { if (action.hasGet()) { throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + @@ -4838,8 +4860,15 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa default: throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name()); } + // To unify the response format with doNonAtomicRegionMutation and read through client's + // AsyncProcess we have to add an empty result instance per operation + resultOrExceptionOrBuilder.clear(); + resultOrExceptionOrBuilder.setIndex(i++); + builder.addResultOrException( + resultOrExceptionOrBuilder.build()); } - return region.checkAndRowMutate(row, family, qualifier, compareOp, comparator, rm, Boolean.TRUE); + return region.checkAndRowMutate(row, family, qualifier, compareOp, + comparator, rm, Boolean.TRUE); } private static class MovedRegionInfo { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java index e22f072..591474f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java @@ -53,7 +53,7 @@ public class TestCheckAndMutate { } @Test - public void testCheckAndMutate() throws Exception { + public void testCheckAndMutate() throws Throwable { final TableName tableName = TableName.valueOf("TestPutWithDelete"); final byte[] rowKey = Bytes.toBytes("12345"); final byte[] family = Bytes.toBytes("cf"); @@ -108,7 +108,12 @@ public class TestCheckAndMutate { table.checkAndMutate(rowKey, family, Bytes.toBytes("A"), CompareFilter.CompareOp.EQUAL, Bytes.toBytes("a"), rm); fail("Expected NoSuchColumnFamilyException"); - } catch(NoSuchColumnFamilyException e) { + } catch (RetriesExhaustedWithDetailsException e) { + try { + throw e.getCause(0); + } catch (NoSuchColumnFamilyException e1) { + // expected + } } } finally { table.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index 2d80fc3..09ca275 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.client; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -173,4 +174,32 @@ public class TestClientPushback { assertNotEquals("AsyncProcess did not submit the work in time", endTime.get(), 0); assertTrue("AsyncProcess did not delay long enough", endTime.get() - startTime >= backoffTime); } + + @Test + public void testMutateRowStats() throws IOException { + Configuration conf = UTIL.getConfiguration(); + HConnection conn = HConnectionManager.createConnection(conf); + HTable table = (HTable) conn.getTable(tableName); + HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); + HRegion region = rs.getOnlineRegions(TableName.valueOf(tableName)).get(0); + + RowMutations mutations = new RowMutations(Bytes.toBytes("row")); + Put p = new Put(Bytes.toBytes("row")); + p.add(family, qualifier, Bytes.toBytes("value2")); + mutations.add(p); + table.mutateRow(mutations); + + ServerStatisticTracker stats = conn.getStatisticsTracker(); + assertNotNull( "No stats configured for the client!", stats); + // get the names so we can query the stats + ServerName server = rs.getServerName(); + byte[] regionName = region.getRegionInfo().getRegionName(); + + // check to see we found some load on the memstore + ServerStatistics serverStats = stats.getServerStatsForTesting(server); + ServerStatistics.RegionStatistics regionStats = serverStats.getStatsForRegion(regionName); + + assertNotNull(regionStats); + assertTrue(regionStats.getMemstoreLoadPercent() > 0); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 27b89c5..ce958d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4532,7 +4532,13 @@ public class TestFromClientSide { arm.add(p); t.mutateRow(arm); fail("Expected NoSuchColumnFamilyException"); - } catch(NoSuchColumnFamilyException e) { + } catch(RetriesExhaustedWithDetailsException e) { + for(Throwable rootCause: e.getCauses()){ + if(rootCause instanceof NoSuchColumnFamilyException){ + return; + } + } + throw e; } } -- 1.9.3 (Apple Git-50)