diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index b31e5e3..b888a8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -21,13 +21,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.io.InterruptedIOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -35,6 +29,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -582,65 +577,121 @@ class AsyncProcess { * @param numAttempt the attempt number. */ private void sendMultiAction( - Map> actionsByServer, final int numAttempt) { + Map> actionsByServer, int numAttempt) { // Run the last item on the same thread if we are already on a send thread. // We hope most of the time it will be the only item, so we can cut down on threads. int reuseThreadCountdown = (numAttempt > 1) ? actionsByServer.size() : Integer.MAX_VALUE; for (Map.Entry> e : actionsByServer.entrySet()) { - final ServerName server = e.getKey(); - final MultiAction multiAction = e.getValue(); + ServerName server = e.getKey(); + MultiAction multiAction = e.getValue(); incTaskCounters(multiAction.getRegions(), server); - Runnable runnable = Trace.wrap("AsyncProcess.sendMultiAction", new Runnable() { - @Override - public void run() { - MultiResponse res; - try { - MultiServerCallable callable = createCallable(server, tableName, multiAction); - try { - res = createCaller(callable).callWithoutRetries(callable, operationTimeout); - } catch (IOException e) { - // The service itself failed . It may be an error coming from the communication - // layer, but, as well, a functional error raised by the server. - receiveGlobalFailure(multiAction, server, numAttempt, e); - return; - } catch (Throwable t) { - // This should not happen. Let's log & retry anyway. - LOG.error("#" + id + ", Caught throwable while calling. This is unexpected." + - " Retrying. Server is " + server.getServerName() + ", tableName=" + tableName, t); - receiveGlobalFailure(multiAction, server, numAttempt, t); - return; - } + Collection runnables = getNewMultiActionRunnable(server, multiAction, + numAttempt); + // make sure we correctly count the number of runnables before we try to reuse the send + // thread, in case we had to split the request into different runnables because of backoff + if (runnables.size() > reuseThreadCountdown) { + reuseThreadCountdown = runnables.size(); + } - // Normal case: we received an answer from the server, and it's not an exception. - receiveMultiAction(multiAction, server, res, numAttempt); - } catch (Throwable t) { - // Something really bad happened. We are on the send thread that will now die. - LOG.error("Internal AsyncProcess #" + id + " error for " - + tableName + " processing for " + server, t); - throw new RuntimeException(t); - } finally { + // run all the runnables + for (Runnable runnable : runnables) { + --reuseThreadCountdown; + if (reuseThreadCountdown == 0) { + runnable.run(); + } else { + try { + pool.submit(runnable); + } catch (RejectedExecutionException ree) { + // This should never happen. But as the pool is provided by the end user, let's secure + // this a little. decTaskCounters(multiAction.getRegions(), server); + LOG.warn("#" + id + ", the task was rejected by the pool. This is unexpected." + + " Server is " + server.getServerName(), ree); + // We're likely to fail again, but this will increment the attempt counter, so it will + // finish. + receiveGlobalFailure(multiAction, server, numAttempt, ree); } } - }); - --reuseThreadCountdown; - if (reuseThreadCountdown == 0) { - runnable.run(); + } + } + } + + private Collection getNewMultiActionRunnable(ServerName server, + MultiAction multiAction, + int numAttempt) { + // no stats to manage, just do the standard action + if (hConnection.getStatisticsTracker() == null) { + return Collections.singletonList(getMultiRunnableForActions(server, multiAction, + numAttempt)); + } + + ServerStatisticTracker stats = hConnection.getStatisticsTracker(); + + // group the actions by the amount of delay + Map actions = new HashMap(multiAction + .size()); + + // split up the actions + for (Map.Entry>> e : multiAction.actions.entrySet()) { + Long backoff = stats.getBackoff(server, e.getKey()); + DelayingRunner runner= actions.get(backoff); + if (runner == null) { + actions.put(backoff, new DelayingRunner(backoff, e)); } else { + runner.add(e); + } + } + + List toReturn = new ArrayList(actions.size()); + for (DelayingRunner runner : actions.values()) { + Runnable run = getMultiRunnableForActions(server, runner.getActions(), numAttempt); + // use a delay runner only if we need to sleep for some time + if(runner.getSleepTime() > 0){ + runner.setRunner(run); + run = runner; + } + toReturn.add(run); + + } + return toReturn; + } + + private Runnable getMultiRunnableForActions(final ServerName server, + final MultiAction multiAction, + final int numAttempt) { + return Trace.wrap("AsyncProcess.sendMultiAction", new Runnable() { + @Override + public void run() { + MultiResponse res; try { - pool.submit(runnable); - } catch (RejectedExecutionException ree) { - // This should never happen. But as the pool is provided by the end user, let's secure - // this a little. + MultiServerCallable callable = createCallable(server, tableName, multiAction); + try { + res = createCaller(callable).callWithoutRetries(callable, operationTimeout); + } catch (IOException e) { + // The service itself failed . It may be an error coming from the communication + // layer, but, as well, a functional error raised by the server. + receiveGlobalFailure(multiAction, server, numAttempt, e); + return; + } catch (Throwable t) { + // This should not happen. Let's log & retry anyway. + LOG.error("#" + id + ", Caught throwable while calling. This is unexpected." + + " Retrying. Server is " + server.getServerName() + ", tableName=" + tableName, t); + receiveGlobalFailure(multiAction, server, numAttempt, t); + return; + } + + // Normal case: we received an answer from the server, and it's not an exception. + receiveMultiAction(multiAction, server, res, numAttempt); + } catch (Throwable t) { + // Something really bad happened. We are on the send thread that will now die. + LOG.error("Internal AsyncProcess #" + id + " error for " + + tableName + " processing for " + server, t); + throw new RuntimeException(t); + } finally { decTaskCounters(multiAction.getRegions(), server); - LOG.warn("#" + id + ", the task was rejected by the pool. This is unexpected." + - " Server is " + server.getServerName(), ree); - // We're likely to fail again, but this will increment the attempt counter, so it will - // finish. - receiveGlobalFailure(multiAction, server, numAttempt, ree); } } - } + }); } /** @@ -805,6 +856,13 @@ class AsyncProcess { toReplay.add(sentAction); } } else { + // update the stats about the region, if its a user table. We don't want to slow down + // updates to meta tables, especially from internal updates (master, etc). + if (hConnection.getStatisticsTracker() != null) { + result = ResultWithStats.updateStatsAndUnwrap(result, + hConnection.getStatisticsTracker(), server, regionName); + } + if (callback != null) { try { //noinspection unchecked diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index fc5c347..ff00d29 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -211,4 +211,9 @@ interface ClusterConnection extends HConnection { * @return Default AsyncProcess associated with this connection. */ AsyncProcess getAsyncProcess(); + + /** + * @return the current statistics tracker associated with this connection + */ + ServerStatisticTracker getStatisticsTracker(); } \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index 2762579..94cc587 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; //NOTE: DO NOT make this class public. It was made package-private on purpose. class ConnectionAdapter implements ClusterConnection { - private final ClusterConnection wrappedConnection; + protected final ClusterConnection wrappedConnection; public ConnectionAdapter(HConnection c) { wrappedConnection = (ClusterConnection)c; @@ -411,4 +411,9 @@ class ConnectionAdapter implements ClusterConnection { public AsyncProcess getAsyncProcess() { return wrappedConnection.getAsyncProcess(); } + + @Override + public ServerStatisticTracker getStatisticsTracker() { + return wrappedConnection.getStatisticsTracker(); + } } \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index a1848dd..dc8fb69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -467,6 +467,8 @@ class ConnectionManager { final int rpcTimeout; private NonceGenerator nonceGenerator = null; private final AsyncProcess asyncProcess; + // single tracker per connection + private final ServerStatisticTracker stats; private volatile boolean closed; private volatile boolean aborted; @@ -595,6 +597,7 @@ class ConnectionManager { this.nonceGenerator = new NoNonceGenerator(); } this.asyncProcess = createAsyncProcess(this.conf); + stats = ServerStatisticTracker.create(conf); } @Override @@ -2201,7 +2204,8 @@ class ConnectionManager { protected AsyncProcess createAsyncProcess(Configuration conf) { // No default pool available. return new AsyncProcess( - this, conf, this.batchPool, RpcRetryingCallerFactory.instantiate(conf), false); + this, conf, this.batchPool, RpcRetryingCallerFactory.instantiate(conf, + this.getStatisticsTracker()), false); } @Override @@ -2209,6 +2213,11 @@ class ConnectionManager { return asyncProcess; } + @Override + public ServerStatisticTracker getStatisticsTracker() { + return this.stats; + } + /* * Return the number of cached region for a table. It will only be called * from a unit test. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java new file mode 100644 index 0000000..3af95d0 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java @@ -0,0 +1,98 @@ +package org.apache.hadoop.hbase.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.util.Sleeper; + +import java.util.List; +import java.util.Map; + +/** + * A wrapper for a runnable for a group of actions for a single regionserver. + *

+ * This can be used to build up the actions that should be taken and then + *

+ */ +public class DelayingRunner implements Runnable { + private static final Log LOG = LogFactory.getLog(DelayingRunner.class); + + private static final long MINIMAL_DELTA_FOR_LOGGING = 10000; + + private final Object sleepLock = new Object(); + private boolean triggerWake = false; + private long sleepTime; + MultiAction actions = new MultiAction(); + private Runnable runnable; + + public DelayingRunner(long sleepTime, Map.Entry>> e) { + this.sleepTime = sleepTime; + add(e); + } + + public void setRunner(Runnable runner) { + this.runnable = runner; + } + + @Override + public void run() { + if (!sleep()) { + LOG.warn( + "Interrupted while sleeping for expected sleep time " + sleepTime + " ms"); + } + //TODO maybe we should consider switching to a listenableFuture for the actual callable and + // then handling the results/errors as callbacks. That way we can decrement outstanding tasks + // even if we get interrupted here, but for now, we still need to run so we decrement the outstanding tasks + this.runnable.run(); + } + + /** + * Sleep for an expected amount of time. + *

+ * This is nearly a copy of what {@link Sleeper} does, but with the ability to know if you got + * interrupted while sleeping. + *

+ * + * @return true if the sleep completely entirely successfully, + * but otherwise false if the sleep was interrupted. + */ + private boolean sleep() { + long now = System.currentTimeMillis(); + long startTime = now; + long waitTime = sleepTime; + while (waitTime > 0) { + long woke = -1; + try { + synchronized (sleepLock) { + if (triggerWake) break; + sleepLock.wait(waitTime); + } + woke = System.currentTimeMillis(); + long slept = woke - now; + if (slept - waitTime > MINIMAL_DELTA_FOR_LOGGING) { + LOG.warn("We slept " + slept + "ms instead of " + waitTime + + "ms, this is likely due to a long " + + "garbage collecting pause and it's usually bad"); + } + } catch (InterruptedException iex) { + return false; + } + // Recalculate waitTime. + woke = (woke == -1) ? System.currentTimeMillis() : woke; + waitTime = waitTime - (woke - startTime); + } + return true; + } + + public void add(Map.Entry>> e) { + actions.add(e.getKey(), e.getValue()); + } + + public MultiAction getActions() { + return actions; + } + + public long getSleepTime() { + return sleepTime; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index c6b11fd..504bca0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -38,7 +38,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; @@ -127,7 +126,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse; @@ -229,7 +227,8 @@ public class HBaseAdmin implements Admin { this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); + this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf, + connection.getStatisticsTracker()); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 49d9354..c52b416 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -43,7 +43,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -361,7 +360,8 @@ public class HTable implements HTableInterface { HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(configuration); + this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(configuration, + connection.getStatisticsTracker()); // puts need to track errors globally due to how the APIs currently work. ap = new AsyncProcess(connection, configuration, pool, rpcCallerFactory, true); multiAp = this.connection.getAsyncProcess(); @@ -1762,7 +1762,7 @@ public class HTable implements HTableInterface { Object[] results = new Object[execs.size()]; AsyncProcess asyncProcess = new AsyncProcess(connection, configuration, pool, - RpcRetryingCallerFactory.instantiate(configuration), true); + RpcRetryingCallerFactory.instantiate(configuration, connection.getStatisticsTracker()), true); AsyncRequestFuture future = asyncProcess.submitAll(tableName, execs, new Callback() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java index 2ca24dc..279ed95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java @@ -18,11 +18,7 @@ */ package org.apache.hadoop.hbase.client; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; +import java.util.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -68,12 +64,24 @@ public final class MultiAction { * @param a */ public void add(byte[] regionName, Action a) { + add(regionName, Arrays.asList(a)); + } + + /** + * Add an Action to this container based on it's regionName. If the regionName + * is wrong, the initial execution will fail, but will be automatically + * retried after looking up the correct region. + * + * @param regionName + * @param actionList list of actions to add for the region + */ + public void add(byte[] regionName, List> actionList){ List> rsActions = actions.get(regionName); if (rsActions == null) { rsActions = new ArrayList>(); actions.put(regionName, rsActions); } - rsActions.add(a); + rsActions.addAll(actionList); } public void setNonceGroup(long nonceGroup) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultWithStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultWithStats.java new file mode 100644 index 0000000..0342eeb --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultWithStats.java @@ -0,0 +1,67 @@ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.util.Pair; + +/** + * A {@link Result} with some statistics about the server/region status + */ +public class ResultWithStats { + + private final Result result; + private final ClientProtos.RegionLoadStats stats; + + public ResultWithStats(Result result, ClientProtos.RegionLoadStats stats) { + this.result = result; + this.stats = stats; + } + + public Result getResult() { + return result; + } + + /** + * Update the stats for the specified region if the result is an instance of {@link + * ResultWithStats} + * + * @param r object that contains the result and possibly the statistics about the region + * @param type of the underlying result expected + * @return the underlying {@link Result} if the passed result is an {@link + * ResultWithStats} or just returns the result; + */ + public static T updateStatsAndUnwrap(T r, ServerStatisticTracker serverStats, + ServerName server, byte[] regionName) { + Pair results = unwrap(r); + + // update the statistics, if we can + if(results.getSecond() != null){ + ResultWithStats result = results.getSecond(); + //only update the stats is we know the region name + if (regionName != null) { + serverStats.updateRegionStats(server, regionName, result.stats); + } + } + + return results.getFirst(); + } + + public static Pair unwrap(T result){ + // early exit if it doesn't have stats + if (!(result instanceof ResultWithStats)) { + return new Pair(result, null); + } + ResultWithStats r = (ResultWithStats) result; + return new Pair((T) r.getResult(), r); + } + + public static T updateStatsAndUnwrap(T r, ServerStatisticTracker stats, + HRegionLocation regionLocation) { + byte[] regionName = null; + if (regionLocation != null) { + regionName = regionLocation.getRegionInfo().getRegionName(); + } + return updateStatsAndUnwrap(r, stats, regionLocation.getServerName(), regionName); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java index 7957cc8..286668f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.client; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -24,13 +26,19 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; /** * Factory to create an {@link RpcRetryingCaller} */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class RpcRetryingCallerFactory { - /** Configuration key for a custom {@link RpcRetryingCaller} */ + /** + * Configuration key for a custom {@link RpcRetryingCaller} + */ public static final String CUSTOM_CALLER_CONF_KEY = "hbase.rpc.callerfactory.class"; protected final Configuration conf; private final long pause; private final int retries; + private final boolean enableBackPressure; + private ServerStatisticTracker stats; public RpcRetryingCallerFactory(Configuration conf) { this.conf = conf; @@ -38,19 +46,37 @@ public class RpcRetryingCallerFactory { HConstants.DEFAULT_HBASE_CLIENT_PAUSE); retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, + HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); + } + + /** + * Set the tracker that should be used for tracking statistics about the server + */ + public void setStatisticTracker(ServerStatisticTracker statisticTracker) { + this.stats = statisticTracker; } public RpcRetryingCaller newCaller() { // We store the values in the factory instance. This way, constructing new objects // is cheap as it does not require parsing a complex structure. + if (enableBackPressure && this.stats != null) { + return new StatsTrackingRpcRetryingCaller(pause, retries, this.stats); + } return new RpcRetryingCaller(pause, retries); } - public static RpcRetryingCallerFactory instantiate(Configuration configuration) { + public static RpcRetryingCallerFactory instantiate(Configuration configuration, + ServerStatisticTracker stats) { String rpcCallerFactoryClazz = configuration.get(RpcRetryingCallerFactory.CUSTOM_CALLER_CONF_KEY, - RpcRetryingCallerFactory.class.getName()); - return ReflectionUtils.instantiateWithCustomCtor(rpcCallerFactoryClazz, - new Class[] { Configuration.class }, new Object[] { configuration }); + RpcRetryingCallerFactory.class.getName()); + RpcRetryingCallerFactory factory = ReflectionUtils.instantiateWithCustomCtor + (rpcCallerFactoryClazz, + new Class[] { Configuration.class }, new Object[] { configuration }); + + // setting for backwards compat with existing caller factories, rather than in the ctor + factory.setStatisticTracker(stats); + return factory; } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java new file mode 100644 index 0000000..4cbf2a3 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java @@ -0,0 +1,80 @@ +package org.apache.hadoop.hbase.client; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy; +import org.apache.hadoop.hbase.client.backoff.ServerStatistics; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.util.ReflectionUtils; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Tracks the statistics for multiple regions + */ +public class ServerStatisticTracker { + + private static final String BACKOFF_POLICY_CLASS = + "org.apache.hadoop.hbase.client.statistics.backoff-policy"; + private final ClientBackoffPolicy backoffPolicy; + + public interface ClientBackoffPolicy { + /** + * @return the number of ms to wait on the client based on the + */ + public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats); + } + + Map stats = Collections.synchronizedMap(new HashMap()); + + public ServerStatisticTracker(Configuration conf) { + // create the backoff policy + String className = conf.get(BACKOFF_POLICY_CLASS, ExponentialClientBackoffPolicy.class.getName()); + this.backoffPolicy = ReflectionUtils.instantiateWithCustomCtor(className, + new Class[] { Configuration.class }, new Object[] { conf }); + } + + public void updateRegionStats(ServerName server, byte[] region, ClientProtos.RegionLoadStats + currentStats) { + ServerStatistics stat = stats.get(server); + // update the current stats, if we already have stats + if (stat != null) { + stat.update(region, currentStats); + return; + } + + // create a stats object and update the stats + synchronized (this) { + stat = stats.get(server); + //someone else created the stat by now, just update it + if (stat == null) { + stat = new ServerStatistics(); + stats.put(server, stat); + } + stat.update(region, currentStats); + } + } + + public Long getBackoff(ServerName server, byte[] key) { + ServerStatistics stats = this.stats.get(server); + return this.backoffPolicy.getBackoffTime(server, key, stats); + } + + public static ServerStatisticTracker create(Configuration conf) { + if(!conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, + HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE)){ + return null; + } + return new ServerStatisticTracker(conf); + } + + @VisibleForTesting + ServerStatistics getServerStatsForTesting(ServerName server){ + return stats.get(server); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java new file mode 100644 index 0000000..6252ac3 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/StatsTrackingRpcRetryingCaller.java @@ -0,0 +1,53 @@ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.TableName; + +import java.io.IOException; +import java.util.regex.Pattern; + +/** + * An {@link RpcRetryingCaller} that will update the per-region stats for the call on return, + * if stats are available + */ +public class StatsTrackingRpcRetryingCaller extends RpcRetryingCaller { + private final ServerStatisticTracker stats; + + public StatsTrackingRpcRetryingCaller(long pause, int retries, ServerStatisticTracker stats) { + super(pause, retries); + this.stats = stats; + } + + @Override + public T callWithRetries(RetryingCallable callable, int callTimeout) + throws IOException, RuntimeException { + T result = super.callWithRetries(callable, callTimeout); + return updateStatsAndUnwrap(result, callable); + } + + @Override + public T callWithoutRetries(RetryingCallable callable, int callTimeout) + throws IOException, RuntimeException { + T result = super.callWithRetries(callable, callTimeout); + return updateStatsAndUnwrap(result, callable); + } + + private T updateStatsAndUnwrap(T result, RetryingCallable callable) { + // don't track stats about requests that aren't to regionservers + if (!(callable instanceof RegionServerCallable)) { + return ResultWithStats.unwrap(result).getFirst(); + } + + // mutli-server callables span multiple regions, so they don't have a location, + // but they are region server callables, so we have to handle them when we process the + // result, not in here + if (callable instanceof MultiServerCallable) { + return ResultWithStats.unwrap(result).getFirst(); + } + + // update the stats for the single server callable + RegionServerCallable regionCallable = (RegionServerCallable) callable; + HRegionLocation location = regionCallable.getLocation(); + return ResultWithStats.updateStatsAndUnwrap(result, stats, location); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java new file mode 100644 index 0000000..9178542 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java @@ -0,0 +1,46 @@ +package org.apache.hadoop.hbase.client.backoff; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.ServerStatisticTracker; + +/** + * Simple exponential backoff policy on for the client that uses a percent^4 times the + * max backoff to generate the backoff time. + */ +//TODO should/can we use the RetyCounter backoff tools? +public class ExponentialClientBackoffPolicy implements ServerStatisticTracker.ClientBackoffPolicy { + + private static final long ONE_MINUTE = 60 * 1000; + public static final long DEFAULT_MAX_BACKOFF = 5 * ONE_MINUTE; + public static final String MAX_BACKOFF_KEY = "org.apache.hadoop.hbase.client.backoff.max"; + private long maxBackoff; + + public ExponentialClientBackoffPolicy(Configuration conf) { + this.maxBackoff = conf.getLong(MAX_BACKOFF_KEY, DEFAULT_MAX_BACKOFF); + } + + @Override + public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats) { + // no stats for the server yet, so don't backoff + if (stats == null) { + return 0; + } + + ServerStatistics.RegionStatistics regionStats = stats.getStatsForRegion(region); + // no stats for the region yet - don't backoff + if (regionStats == null) { + return 0; + } + + // square the percent as a value less than 1. Closer we move to 100 percent, + // the percent moves to 1, but squaring causes the exponential curve + double percent = regionStats.getMemstoreLoadPercent() / 100.0; + double multiplier = Math.pow(percent, 4.0); + // shouldn't ever happen, but just incase something changes in the statistic data + if (multiplier > 1) { + multiplier = 1; + } + return (long) (multiplier * maxBackoff); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java new file mode 100644 index 0000000..42dba82 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java @@ -0,0 +1,48 @@ +package org.apache.hadoop.hbase.client.backoff; + +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.Map; +import java.util.TreeMap; + +/** + * Track the statistics for a single region + */ +public class ServerStatistics { + + Map + stats = new TreeMap(Bytes.BYTES_COMPARATOR); + + /** + * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, + * as something gets set + * @param region + * @param currentStats + */ + public void update(byte[] region, ClientProtos.RegionLoadStats currentStats) { + RegionStatistics regionStat = this.stats.get(region); + if(regionStat == null){ + regionStat = new RegionStatistics(); + this.stats.put(region, regionStat); + } + + regionStat.update(currentStats); + } + + public RegionStatistics getStatsForRegion(byte[] regionName){ + return stats.get(regionName); + } + + public static class RegionStatistics{ + private int memstoreLoad = 0; + + public void update(ClientProtos.RegionLoadStats currentStats) { + this.memstoreLoad = currentStats.getMemstoreLoad(); + } + + public int getMemstoreLoadPercent(){ + return this.memstoreLoad; + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java index e627662..071e9c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java @@ -61,7 +61,7 @@ public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{ this.connection = conn; this.table = table; this.row = row; - this.rpcFactory = RpcRetryingCallerFactory.instantiate(conn.getConfiguration()); + this.rpcFactory = RpcRetryingCallerFactory.instantiate(conn.getConfiguration(), null); this.operationTimeout = conn.getConfiguration().getInt( HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index 3efa4a8..bf381e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultWithStats; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; @@ -116,17 +117,25 @@ public final class ResponseConverter { } for (ResultOrException roe : actionResult.getResultOrExceptionList()) { + Object responseValue; if (roe.hasException()) { - results.add(regionName, roe.getIndex(), ProtobufUtil.toException(roe.getException())); + responseValue = ProtobufUtil.toException(roe.getException()); } else if (roe.hasResult()) { - results.add(regionName, roe.getIndex(), ProtobufUtil.toResult(roe.getResult(), cells)); + Result r = ProtobufUtil.toResult(roe.getResult(), cells); + // add the load stats, if we got any + if (roe.hasLoadStats()) { + responseValue = new ResultWithStats(r, roe.getLoadStats()); + } else { + responseValue = r; + } } else if (roe.hasServiceResult()) { - results.add(regionName, roe.getIndex(), roe.getServiceResult()); + responseValue = roe.getServiceResult(); } else { // no result & no exception. Unexpected. throw new IllegalStateException("No result & no exception roe=" + roe + " for region " + actions.getRegion()); } + results.add(regionName, roe.getIndex(), responseValue); } } @@ -151,9 +160,11 @@ public final class ResponseConverter { * @param r * @return an action result builder */ - public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) { + public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r, + ClientProtos.RegionLoadStats stats) { ResultOrException.Builder builder = ResultOrException.newBuilder(); if (r != null) builder.setResult(r); + if(stats != null) builder.setLoadStats(stats); return builder; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java new file mode 100644 index 0000000..3f28360 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java @@ -0,0 +1,93 @@ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy; +import org.apache.hadoop.hbase.client.backoff.ServerStatistics; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestClientExponentialBackoff { + + ServerName server = Mockito.mock(ServerName.class); + byte[] regionname = Bytes.toBytes("region"); + + @Test + public void testNulls() { + Configuration conf = new Configuration(false); + ExponentialClientBackoffPolicy backoff = new ExponentialClientBackoffPolicy(conf); + assertEquals(0, backoff.getBackoffTime(null, null, null)); + + // server name doesn't matter to calculation, but check it now anyways + assertEquals(0, backoff.getBackoffTime(server, null, null)); + assertEquals(0, backoff.getBackoffTime(server, regionname, null)); + + // check when no stats for the region yet + ServerStatistics stats = new ServerStatistics(); + assertEquals(0, backoff.getBackoffTime(server, regionname, stats)); + } + + @Test + public void testMaxLoad() { + Configuration conf = new Configuration(false); + ExponentialClientBackoffPolicy backoff = new ExponentialClientBackoffPolicy(conf); + + ServerStatistics stats = new ServerStatistics(); + update(stats, 100); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, + regionname, stats)); + + // another policy with a different max timeout + long max = 100; + conf.setLong(ExponentialClientBackoffPolicy.MAX_BACKOFF_KEY, max); + ExponentialClientBackoffPolicy backoffShortTimeout = new ExponentialClientBackoffPolicy(conf); + assertEquals(max, backoffShortTimeout.getBackoffTime(server, regionname, stats)); + + // test beyond 100 still doesn't exceed the max + update(stats, 101); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, + regionname, stats)); + assertEquals(max, backoffShortTimeout.getBackoffTime(server, regionname, stats)); + + // and that when we are below 100, its less than the max timeout + update(stats, 99); + assertTrue(backoff.getBackoffTime(server, + regionname, stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); + assertTrue(backoffShortTimeout.getBackoffTime(server, regionname, stats) < max); + } + + /** + * Make sure that we get results in the order that we expect - backoff for a load of 1 should + * less than backoff for 10, which should be less than that for 50. + */ + @Test + public void testResultOrdering() { + Configuration conf = new Configuration(false); + // make the max timeout really high so we get differentiation between load factors + conf.setLong(ExponentialClientBackoffPolicy.MAX_BACKOFF_KEY, Integer.MAX_VALUE); + ExponentialClientBackoffPolicy backoff = new ExponentialClientBackoffPolicy(conf); + + ServerStatistics stats = new ServerStatistics(); + long previous = backoff.getBackoffTime(server, regionname, stats); + for (int i = 1; i <= 100; i++) { + update(stats, i); + long next = backoff.getBackoffTime(server, regionname, stats); + assertTrue( + "Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + + "load " + i, previous < next); + previous = next; + } + } + + private void update(ServerStatistics stats, int load) { + ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() + .setMemstoreLoad + (load).build(); + stats.update(regionname, stat); + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 31aebcb..00e37ae 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -991,6 +991,13 @@ public final class HConstants { /** Config for pluggable consensus provider */ public static final String HBASE_CONSENSUS_PROVIDER_CLASS = "hbase.consensus.provider.class"; + /** Config key for if the server should send backpressure and if the client should listen to + * that backpressure from the server */ + public static final String ENABLE_CLIENT_BACKPRESSURE = "org.apache.hadoop.hbase.client" + + ".backpressure.enabled"; + public static final boolean DEFAULT_ENABLE_CLIENT_BACKPRESSURE = false; + + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index fd81fe3..7c021df 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -25623,6 +25623,482 @@ public final class ClientProtos { // @@protoc_insertion_point(class_scope:RegionAction) } + public interface RegionLoadStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int32 memstoreLoad = 2 [default = 0]; + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+     * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+     * 
+ */ + boolean hasMemstoreLoad(); + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+     * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+     * 
+ */ + int getMemstoreLoad(); + } + /** + * Protobuf type {@code RegionLoadStats} + * + *
+   *
+   * Statistics about the current load on the region
+   * 
+ */ + public static final class RegionLoadStats extends + com.google.protobuf.GeneratedMessage + implements RegionLoadStatsOrBuilder { + // Use RegionLoadStats.newBuilder() to construct. + private RegionLoadStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RegionLoadStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegionLoadStats defaultInstance; + public static RegionLoadStats getDefaultInstance() { + return defaultInstance; + } + + public RegionLoadStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionLoadStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 16: { + bitField0_ |= 0x00000001; + memstoreLoad_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_RegionLoadStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_RegionLoadStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionLoadStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionLoadStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int32 memstoreLoad = 2 [default = 0]; + public static final int MEMSTORELOAD_FIELD_NUMBER = 2; + private int memstoreLoad_; + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+     * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+     * 
+ */ + public boolean hasMemstoreLoad() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+     * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+     * 
+ */ + public int getMemstoreLoad() { + return memstoreLoad_; + } + + private void initFields() { + memstoreLoad_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(2, memstoreLoad_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, memstoreLoad_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats) obj; + + boolean result = true; + result = result && (hasMemstoreLoad() == other.hasMemstoreLoad()); + if (hasMemstoreLoad()) { + result = result && (getMemstoreLoad() + == other.getMemstoreLoad()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMemstoreLoad()) { + hash = (37 * hash) + MEMSTORELOAD_FIELD_NUMBER; + hash = (53 * hash) + getMemstoreLoad(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RegionLoadStats} + * + *
+     *
+     * Statistics about the current load on the region
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_RegionLoadStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_RegionLoadStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + memstoreLoad_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_RegionLoadStats_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats build() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.memstoreLoad_ = memstoreLoad_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance()) return this; + if (other.hasMemstoreLoad()) { + setMemstoreLoad(other.getMemstoreLoad()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int32 memstoreLoad = 2 [default = 0]; + private int memstoreLoad_ ; + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+       * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+       * 
+ */ + public boolean hasMemstoreLoad() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+       * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+       * 
+ */ + public int getMemstoreLoad() { + return memstoreLoad_; + } + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+       * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+       * 
+ */ + public Builder setMemstoreLoad(int value) { + bitField0_ |= 0x00000001; + memstoreLoad_ = value; + onChanged(); + return this; + } + /** + * optional int32 memstoreLoad = 2 [default = 0]; + * + *
+       * percent load on the memstore. Guaranteed to be positive, between 0 and 100
+       * 
+ */ + public Builder clearMemstoreLoad() { + bitField0_ = (bitField0_ & ~0x00000001); + memstoreLoad_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RegionLoadStats) + } + + static { + defaultInstance = new RegionLoadStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionLoadStats) + } + public interface ResultOrExceptionOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -25699,6 +26175,32 @@ public final class ClientProtos { * */ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResultOrBuilder getServiceResultOrBuilder(); + + // optional .RegionLoadStats loadStats = 5; + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+     * current load on the region
+     * 
+ */ + boolean hasLoadStats(); + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+     * current load on the region
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats(); + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+     * current load on the region
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder(); } /** * Protobuf type {@code ResultOrException} @@ -25802,6 +26304,19 @@ public final class ClientProtos { bitField0_ |= 0x00000008; break; } + case 42: { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = loadStats_.toBuilder(); + } + loadStats_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loadStats_); + loadStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -25946,11 +26461,46 @@ public final class ClientProtos { return serviceResult_; } + // optional .RegionLoadStats loadStats = 5; + public static final int LOADSTATS_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats loadStats_; + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+     * current load on the region
+     * 
+ */ + public boolean hasLoadStats() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+     * current load on the region
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats() { + return loadStats_; + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+     * current load on the region
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder() { + return loadStats_; + } + private void initFields() { index_ = 0; result_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance(); exception_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); serviceResult_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResult.getDefaultInstance(); + loadStats_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -25988,6 +26538,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, serviceResult_); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, loadStats_); + } getUnknownFields().writeTo(output); } @@ -26013,6 +26566,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, serviceResult_); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, loadStats_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -26056,6 +26613,11 @@ public final class ClientProtos { result = result && getServiceResult() .equals(other.getServiceResult()); } + result = result && (hasLoadStats() == other.hasLoadStats()); + if (hasLoadStats()) { + result = result && getLoadStats() + .equals(other.getLoadStats()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -26085,6 +26647,10 @@ public final class ClientProtos { hash = (37 * hash) + SERVICE_RESULT_FIELD_NUMBER; hash = (53 * hash) + getServiceResult().hashCode(); } + if (hasLoadStats()) { + hash = (37 * hash) + LOADSTATS_FIELD_NUMBER; + hash = (53 * hash) + getLoadStats().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -26196,6 +26762,7 @@ public final class ClientProtos { getResultFieldBuilder(); getExceptionFieldBuilder(); getServiceResultFieldBuilder(); + getLoadStatsFieldBuilder(); } } private static Builder create() { @@ -26224,6 +26791,12 @@ public final class ClientProtos { serviceResultBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); + if (loadStatsBuilder_ == null) { + loadStats_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance(); + } else { + loadStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -26280,6 +26853,14 @@ public final class ClientProtos { } else { result.serviceResult_ = serviceResultBuilder_.build(); } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (loadStatsBuilder_ == null) { + result.loadStats_ = loadStats_; + } else { + result.loadStats_ = loadStatsBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -26308,6 +26889,9 @@ public final class ClientProtos { if (other.hasServiceResult()) { mergeServiceResult(other.getServiceResult()); } + if (other.hasLoadStats()) { + mergeLoadStats(other.getLoadStats()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -26787,6 +27371,159 @@ public final class ClientProtos { return serviceResultBuilder_; } + // optional .RegionLoadStats loadStats = 5; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats loadStats_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder> loadStatsBuilder_; + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public boolean hasLoadStats() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats getLoadStats() { + if (loadStatsBuilder_ == null) { + return loadStats_; + } else { + return loadStatsBuilder_.getMessage(); + } + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public Builder setLoadStats(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { + if (loadStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loadStats_ = value; + onChanged(); + } else { + loadStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public Builder setLoadStats( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder builderForValue) { + if (loadStatsBuilder_ == null) { + loadStats_ = builderForValue.build(); + onChanged(); + } else { + loadStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public Builder mergeLoadStats(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats value) { + if (loadStatsBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + loadStats_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance()) { + loadStats_ = + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.newBuilder(loadStats_).mergeFrom(value).buildPartial(); + } else { + loadStats_ = value; + } + onChanged(); + } else { + loadStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public Builder clearLoadStats() { + if (loadStatsBuilder_ == null) { + loadStats_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.getDefaultInstance(); + onChanged(); + } else { + loadStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder getLoadStatsBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getLoadStatsFieldBuilder().getBuilder(); + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder getLoadStatsOrBuilder() { + if (loadStatsBuilder_ != null) { + return loadStatsBuilder_.getMessageOrBuilder(); + } else { + return loadStats_; + } + } + /** + * optional .RegionLoadStats loadStats = 5; + * + *
+       * current load on the region
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder> + getLoadStatsFieldBuilder() { + if (loadStatsBuilder_ == null) { + loadStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder>( + loadStats_, + getParentForChildren(), + isClean()); + loadStats_ = null; + } + return loadStatsBuilder_; + } + // @@protoc_insertion_point(builder_scope:ResultOrException) } @@ -30071,6 +30808,11 @@ public final class ClientProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionAction_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionLoadStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionLoadStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_ResultOrException_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -30181,26 +30923,28 @@ public final class ClientProtos { "ce_call\030\004 \001(\0132\027.CoprocessorServiceCall\"Y", "\n\014RegionAction\022 \n\006region\030\001 \002(\0132\020.RegionS" + "pecifier\022\016\n\006atomic\030\002 \001(\010\022\027\n\006action\030\003 \003(\013" + - "2\007.Action\"\221\001\n\021ResultOrException\022\r\n\005index" + - "\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Result\022!\n\texcep" + - "tion\030\003 \001(\0132\016.NameBytesPair\0221\n\016service_re" + - "sult\030\004 \001(\0132\031.CoprocessorServiceResult\"f\n" + - "\022RegionActionResult\022-\n\021resultOrException" + - "\030\001 \003(\0132\022.ResultOrException\022!\n\texception\030" + - "\002 \001(\0132\016.NameBytesPair\"G\n\014MultiRequest\022#\n" + - "\014regionAction\030\001 \003(\0132\r.RegionAction\022\022\n\nno", - "nceGroup\030\002 \001(\004\"@\n\rMultiResponse\022/\n\022regio" + - "nActionResult\030\001 \003(\0132\023.RegionActionResult" + - "2\261\002\n\rClientService\022 \n\003Get\022\013.GetRequest\032\014" + - ".GetResponse\022)\n\006Mutate\022\016.MutateRequest\032\017" + - ".MutateResponse\022#\n\004Scan\022\014.ScanRequest\032\r." + - "ScanResponse\022>\n\rBulkLoadHFile\022\025.BulkLoad" + - "HFileRequest\032\026.BulkLoadHFileResponse\022F\n\013" + - "ExecService\022\032.CoprocessorServiceRequest\032" + - "\033.CoprocessorServiceResponse\022&\n\005Multi\022\r." + - "MultiRequest\032\016.MultiResponseBB\n*org.apac", - "he.hadoop.hbase.protobuf.generatedB\014Clie" + - "ntProtosH\001\210\001\001\240\001\001" + "2\007.Action\"*\n\017RegionLoadStats\022\027\n\014memstore" + + "Load\030\002 \001(\005:\0010\"\266\001\n\021ResultOrException\022\r\n\005i" + + "ndex\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Result\022!\n\te" + + "xception\030\003 \001(\0132\016.NameBytesPair\0221\n\016servic" + + "e_result\030\004 \001(\0132\031.CoprocessorServiceResul" + + "t\022#\n\tloadStats\030\005 \001(\0132\020.RegionLoadStats\"f" + + "\n\022RegionActionResult\022-\n\021resultOrExceptio" + + "n\030\001 \003(\0132\022.ResultOrException\022!\n\texception", + "\030\002 \001(\0132\016.NameBytesPair\"G\n\014MultiRequest\022#" + + "\n\014regionAction\030\001 \003(\0132\r.RegionAction\022\022\n\nn" + + "onceGroup\030\002 \001(\004\"@\n\rMultiResponse\022/\n\022regi" + + "onActionResult\030\001 \003(\0132\023.RegionActionResul" + + "t2\261\002\n\rClientService\022 \n\003Get\022\013.GetRequest\032" + + "\014.GetResponse\022)\n\006Mutate\022\016.MutateRequest\032" + + "\017.MutateResponse\022#\n\004Scan\022\014.ScanRequest\032\r" + + ".ScanResponse\022>\n\rBulkLoadHFile\022\025.BulkLoa" + + "dHFileRequest\032\026.BulkLoadHFileResponse\022F\n" + + "\013ExecService\022\032.CoprocessorServiceRequest", + "\032\033.CoprocessorServiceResponse\022&\n\005Multi\022\r" + + ".MultiRequest\032\016.MultiResponseBB\n*org.apa" + + "che.hadoop.hbase.protobuf.generatedB\014Cli" + + "entProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -30357,26 +31101,32 @@ public final class ClientProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionAction_descriptor, new java.lang.String[] { "Region", "Atomic", "Action", }); - internal_static_ResultOrException_descriptor = + internal_static_RegionLoadStats_descriptor = getDescriptor().getMessageTypes().get(22); + internal_static_RegionLoadStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionLoadStats_descriptor, + new java.lang.String[] { "MemstoreLoad", }); + internal_static_ResultOrException_descriptor = + getDescriptor().getMessageTypes().get(23); internal_static_ResultOrException_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ResultOrException_descriptor, - new java.lang.String[] { "Index", "Result", "Exception", "ServiceResult", }); + new java.lang.String[] { "Index", "Result", "Exception", "ServiceResult", "LoadStats", }); internal_static_RegionActionResult_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(24); internal_static_RegionActionResult_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionActionResult_descriptor, new java.lang.String[] { "ResultOrException", "Exception", }); internal_static_MultiRequest_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(25); internal_static_MultiRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiRequest_descriptor, new java.lang.String[] { "RegionAction", "NonceGroup", }); internal_static_MultiResponse_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(26); internal_static_MultiResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java index 85e4816..eeab45c 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java @@ -720,53 +720,53 @@ public final class MapReduceProtos { public interface TableSnapshotRegionSplitOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional .TableSchema table = 1; + // repeated string locations = 2; /** - * optional .TableSchema table = 1; + * repeated string locations = 2; */ - boolean hasTable(); + java.util.List + getLocationsList(); /** - * optional .TableSchema table = 1; + * repeated string locations = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable(); + int getLocationsCount(); /** - * optional .TableSchema table = 1; + * repeated string locations = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder(); - - // optional .RegionInfo region = 2; + java.lang.String getLocations(int index); /** - * optional .RegionInfo region = 2; + * repeated string locations = 2; */ - boolean hasRegion(); + com.google.protobuf.ByteString + getLocationsBytes(int index); + + // optional .TableSchema table = 3; /** - * optional .RegionInfo region = 2; + * optional .TableSchema table = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(); + boolean hasTable(); /** - * optional .RegionInfo region = 2; + * optional .TableSchema table = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); - - // repeated string locations = 3; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable(); /** - * repeated string locations = 3; + * optional .TableSchema table = 3; */ - java.util.List - getLocationsList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder(); + + // optional .RegionInfo region = 4; /** - * repeated string locations = 3; + * optional .RegionInfo region = 4; */ - int getLocationsCount(); + boolean hasRegion(); /** - * repeated string locations = 3; + * optional .RegionInfo region = 4; */ - java.lang.String getLocations(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(); /** - * repeated string locations = 3; + * optional .RegionInfo region = 4; */ - com.google.protobuf.ByteString - getLocationsBytes(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); } /** * Protobuf type {@code TableSnapshotRegionSplit} @@ -819,7 +819,15 @@ public final class MapReduceProtos { } break; } - case 10: { + case 18: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + locations_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + locations_.add(input.readBytes()); + break; + } + case 26: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = table_.toBuilder(); @@ -832,7 +840,7 @@ public final class MapReduceProtos { bitField0_ |= 0x00000001; break; } - case 18: { + case 34: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = region_.toBuilder(); @@ -845,14 +853,6 @@ public final class MapReduceProtos { bitField0_ |= 0x00000002; break; } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - locations_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000004; - } - locations_.add(input.readBytes()); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -861,7 +861,7 @@ public final class MapReduceProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { locations_ = new com.google.protobuf.UnmodifiableLazyStringList(locations_); } this.unknownFields = unknownFields.build(); @@ -896,84 +896,84 @@ public final class MapReduceProtos { } private int bitField0_; - // optional .TableSchema table = 1; - public static final int TABLE_FIELD_NUMBER = 1; + // repeated string locations = 2; + public static final int LOCATIONS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList locations_; + /** + * repeated string locations = 2; + */ + public java.util.List + getLocationsList() { + return locations_; + } + /** + * repeated string locations = 2; + */ + public int getLocationsCount() { + return locations_.size(); + } + /** + * repeated string locations = 2; + */ + public java.lang.String getLocations(int index) { + return locations_.get(index); + } + /** + * repeated string locations = 2; + */ + public com.google.protobuf.ByteString + getLocationsBytes(int index) { + return locations_.getByteString(index); + } + + // optional .TableSchema table = 3; + public static final int TABLE_FIELD_NUMBER = 3; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_; /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public boolean hasTable() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() { return table_; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() { return table_; } - // optional .RegionInfo region = 2; - public static final int REGION_FIELD_NUMBER = 2; + // optional .RegionInfo region = 4; + public static final int REGION_FIELD_NUMBER = 4; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_; /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public boolean hasRegion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { return region_; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { return region_; } - // repeated string locations = 3; - public static final int LOCATIONS_FIELD_NUMBER = 3; - private com.google.protobuf.LazyStringList locations_; - /** - * repeated string locations = 3; - */ - public java.util.List - getLocationsList() { - return locations_; - } - /** - * repeated string locations = 3; - */ - public int getLocationsCount() { - return locations_.size(); - } - /** - * repeated string locations = 3; - */ - public java.lang.String getLocations(int index) { - return locations_.get(index); - } - /** - * repeated string locations = 3; - */ - public com.google.protobuf.ByteString - getLocationsBytes(int index) { - return locations_.getByteString(index); - } - private void initFields() { + locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); - locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -999,14 +999,14 @@ public final class MapReduceProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + for (int i = 0; i < locations_.size(); i++) { + output.writeBytes(2, locations_.getByteString(i)); + } if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, table_); + output.writeMessage(3, table_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, region_); - } - for (int i = 0; i < locations_.size(); i++) { - output.writeBytes(3, locations_.getByteString(i)); + output.writeMessage(4, region_); } getUnknownFields().writeTo(output); } @@ -1017,14 +1017,6 @@ public final class MapReduceProtos { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, table_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, region_); - } { int dataSize = 0; for (int i = 0; i < locations_.size(); i++) { @@ -1034,6 +1026,14 @@ public final class MapReduceProtos { size += dataSize; size += 1 * getLocationsList().size(); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, region_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -1057,6 +1057,8 @@ public final class MapReduceProtos { org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other = (org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit) obj; boolean result = true; + result = result && getLocationsList() + .equals(other.getLocationsList()); result = result && (hasTable() == other.hasTable()); if (hasTable()) { result = result && getTable() @@ -1067,8 +1069,6 @@ public final class MapReduceProtos { result = result && getRegion() .equals(other.getRegion()); } - result = result && getLocationsList() - .equals(other.getLocationsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -1082,6 +1082,10 @@ public final class MapReduceProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getLocationsCount() > 0) { + hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getLocationsList().hashCode(); + } if (hasTable()) { hash = (37 * hash) + TABLE_FIELD_NUMBER; hash = (53 * hash) + getTable().hashCode(); @@ -1090,10 +1094,6 @@ public final class MapReduceProtos { hash = (37 * hash) + REGION_FIELD_NUMBER; hash = (53 * hash) + getRegion().hashCode(); } - if (getLocationsCount() > 0) { - hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; - hash = (53 * hash) + getLocationsList().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -1205,19 +1205,19 @@ public final class MapReduceProtos { public Builder clear() { super.clear(); + locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); if (tableBuilder_ == null) { table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); } else { tableBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); if (regionBuilder_ == null) { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); - locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -1247,7 +1247,13 @@ public final class MapReduceProtos { org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit result = new org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + locations_ = new com.google.protobuf.UnmodifiableLazyStringList( + locations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.locations_ = locations_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } if (tableBuilder_ == null) { @@ -1255,7 +1261,7 @@ public final class MapReduceProtos { } else { result.table_ = tableBuilder_.build(); } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } if (regionBuilder_ == null) { @@ -1263,12 +1269,6 @@ public final class MapReduceProtos { } else { result.region_ = regionBuilder_.build(); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - locations_ = new com.google.protobuf.UnmodifiableLazyStringList( - locations_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.locations_ = locations_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1285,22 +1285,22 @@ public final class MapReduceProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.getDefaultInstance()) return this; - if (other.hasTable()) { - mergeTable(other.getTable()); - } - if (other.hasRegion()) { - mergeRegion(other.getRegion()); - } if (!other.locations_.isEmpty()) { if (locations_.isEmpty()) { locations_ = other.locations_; - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000001); } else { ensureLocationsIsMutable(); locations_.addAll(other.locations_); } onChanged(); } + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -1340,18 +1340,111 @@ public final class MapReduceProtos { } private int bitField0_; - // optional .TableSchema table = 1; + // repeated string locations = 2; + private com.google.protobuf.LazyStringList locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureLocationsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + locations_ = new com.google.protobuf.LazyStringArrayList(locations_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string locations = 2; + */ + public java.util.List + getLocationsList() { + return java.util.Collections.unmodifiableList(locations_); + } + /** + * repeated string locations = 2; + */ + public int getLocationsCount() { + return locations_.size(); + } + /** + * repeated string locations = 2; + */ + public java.lang.String getLocations(int index) { + return locations_.get(index); + } + /** + * repeated string locations = 2; + */ + public com.google.protobuf.ByteString + getLocationsBytes(int index) { + return locations_.getByteString(index); + } + /** + * repeated string locations = 2; + */ + public Builder setLocations( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder addLocations( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.add(value); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder addAllLocations( + java.lang.Iterable values) { + ensureLocationsIsMutable(); + super.addAll(values, locations_); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder clearLocations() { + locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder addLocationsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.add(value); + onChanged(); + return this; + } + + // optional .TableSchema table = 3; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableBuilder_; /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public boolean hasTable() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() { if (tableBuilder_ == null) { @@ -1361,7 +1454,7 @@ public final class MapReduceProtos { } } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { if (tableBuilder_ == null) { @@ -1373,11 +1466,11 @@ public final class MapReduceProtos { } else { tableBuilder_.setMessage(value); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder setTable( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { @@ -1387,15 +1480,15 @@ public final class MapReduceProtos { } else { tableBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { if (tableBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && + if (((bitField0_ & 0x00000002) == 0x00000002) && table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(table_).mergeFrom(value).buildPartial(); @@ -1406,11 +1499,11 @@ public final class MapReduceProtos { } else { tableBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder clearTable() { if (tableBuilder_ == null) { @@ -1419,19 +1512,19 @@ public final class MapReduceProtos { } else { tableBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableBuilder() { - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; onChanged(); return getTableFieldBuilder().getBuilder(); } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() { if (tableBuilder_ != null) { @@ -1441,7 +1534,7 @@ public final class MapReduceProtos { } } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> @@ -1457,18 +1550,18 @@ public final class MapReduceProtos { return tableBuilder_; } - // optional .RegionInfo region = 2; + // optional .RegionInfo region = 4; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public boolean hasRegion() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { if (regionBuilder_ == null) { @@ -1478,7 +1571,7 @@ public final class MapReduceProtos { } } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { @@ -1490,11 +1583,11 @@ public final class MapReduceProtos { } else { regionBuilder_.setMessage(value); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder setRegion( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { @@ -1504,15 +1597,15 @@ public final class MapReduceProtos { } else { regionBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && + if (((bitField0_ & 0x00000004) == 0x00000004) && region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial(); @@ -1523,11 +1616,11 @@ public final class MapReduceProtos { } else { regionBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder clearRegion() { if (regionBuilder_ == null) { @@ -1536,19 +1629,19 @@ public final class MapReduceProtos { } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; onChanged(); return getRegionFieldBuilder().getBuilder(); } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { if (regionBuilder_ != null) { @@ -1558,7 +1651,7 @@ public final class MapReduceProtos { } } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> @@ -1574,99 +1667,6 @@ public final class MapReduceProtos { return regionBuilder_; } - // repeated string locations = 3; - private com.google.protobuf.LazyStringList locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureLocationsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - locations_ = new com.google.protobuf.LazyStringArrayList(locations_); - bitField0_ |= 0x00000004; - } - } - /** - * repeated string locations = 3; - */ - public java.util.List - getLocationsList() { - return java.util.Collections.unmodifiableList(locations_); - } - /** - * repeated string locations = 3; - */ - public int getLocationsCount() { - return locations_.size(); - } - /** - * repeated string locations = 3; - */ - public java.lang.String getLocations(int index) { - return locations_.get(index); - } - /** - * repeated string locations = 3; - */ - public com.google.protobuf.ByteString - getLocationsBytes(int index) { - return locations_.getByteString(index); - } - /** - * repeated string locations = 3; - */ - public Builder setLocations( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocationsIsMutable(); - locations_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder addLocations( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocationsIsMutable(); - locations_.add(value); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder addAllLocations( - java.lang.Iterable values) { - ensureLocationsIsMutable(); - super.addAll(values, locations_); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder clearLocations() { - locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder addLocationsBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocationsIsMutable(); - locations_.add(value); - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:TableSnapshotRegionSplit) } @@ -1699,9 +1699,9 @@ public final class MapReduceProtos { java.lang.String[] descriptorData = { "\n\017MapReduce.proto\032\013HBase.proto\".\n\013ScanMe" + "trics\022\037\n\007metrics\030\001 \003(\0132\016.NameInt64Pair\"g" + - "\n\030TableSnapshotRegionSplit\022\033\n\005table\030\001 \001(" + - "\0132\014.TableSchema\022\033\n\006region\030\002 \001(\0132\013.Region" + - "Info\022\021\n\tlocations\030\003 \003(\tBB\n*org.apache.ha" + + "\n\030TableSnapshotRegionSplit\022\021\n\tlocations\030" + + "\002 \003(\t\022\033\n\005table\030\003 \001(\0132\014.TableSchema\022\033\n\006re" + + "gion\030\004 \001(\0132\013.RegionInfoBB\n*org.apache.ha" + "doop.hbase.protobuf.generatedB\017MapReduce" + "ProtosH\001\240\001\001" }; @@ -1721,7 +1721,7 @@ public final class MapReduceProtos { internal_static_TableSnapshotRegionSplit_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableSnapshotRegionSplit_descriptor, - new java.lang.String[] { "Table", "Region", "Locations", }); + new java.lang.String[] { "Locations", "Table", "Region", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 631d0cd..0602dff 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -337,6 +337,14 @@ message RegionAction { repeated Action action = 3; } +/* +* Statistics about the current load on the region +*/ +message RegionLoadStats{ + // percent load on the memstore. Guaranteed to be positive, between 0 and 100 + optional int32 memstoreLoad = 2 [default = 0]; +} + /** * Either a Result or an Exception NameBytesPair (keyed by * exception name whose value is the exception stringified) @@ -350,6 +358,8 @@ message ResultOrException { optional NameBytesPair exception = 3; // result if this was a coprocessor service call optional CoprocessorServiceResult service_result = 4; + // current load on the region + optional RegionLoadStats loadStats = 5; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java index 85c86f9..380f29e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class CoprocessorHConnection implements ClusterConnection { +public class CoprocessorHConnection extends ConnectionAdapter{ private static final NonceGenerator ng = new ConnectionManager.NoNonceGenerator(); /** @@ -75,294 +75,29 @@ public class CoprocessorHConnection implements ClusterConnection { return connection; } - private ClusterConnection delegate; private ServerName serverName; private HRegionServer server; public CoprocessorHConnection(ClusterConnection delegate, HRegionServer server) { + super(delegate); this.server = server; this.serverName = server.getServerName(); - this.delegate = delegate; } + @Override public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface getClient(ServerName serverName) throws IOException { // client is trying to reach off-server, so we can't do anything special if (!this.serverName.equals(serverName)) { - return delegate.getClient(serverName); + return wrappedConnection.getClient(serverName); } // the client is attempting to write to the same regionserver, we can short-circuit to our // local regionserver return server.getRSRpcServices(); } - public void abort(String why, Throwable e) { - delegate.abort(why, e); - } - - public boolean isAborted() { - return delegate.isAborted(); - } - - public Configuration getConfiguration() { - return delegate.getConfiguration(); - } - - public HTableInterface getTable(String tableName) throws IOException { - return delegate.getTable(tableName); - } - - public HTableInterface getTable(byte[] tableName) throws IOException { - return delegate.getTable(tableName); - } - - public HTableInterface getTable(TableName tableName) throws IOException { - return delegate.getTable(tableName); - } - - public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException { - return delegate.getTable(tableName, pool); - } - - public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException { - return delegate.getTable(tableName, pool); - } - - public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException { - return delegate.getTable(tableName, pool); - } - - public Admin getAdmin() throws IOException { return delegate.getAdmin(); } - - public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException { - return delegate.isMasterRunning(); - } - - public boolean isTableEnabled(TableName tableName) throws IOException { - return delegate.isTableEnabled(tableName); - } - - public boolean isTableEnabled(byte[] tableName) throws IOException { - return delegate.isTableEnabled(tableName); - } - - public boolean isTableDisabled(TableName tableName) throws IOException { - return delegate.isTableDisabled(tableName); - } - - public boolean isTableDisabled(byte[] tableName) throws IOException { - return delegate.isTableDisabled(tableName); - } - - public boolean isTableAvailable(TableName tableName) throws IOException { - return delegate.isTableAvailable(tableName); - } - - public boolean isTableAvailable(byte[] tableName) throws IOException { - return delegate.isTableAvailable(tableName); - } - - public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException { - return delegate.isTableAvailable(tableName, splitKeys); - } - - public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException { - return delegate.isTableAvailable(tableName, splitKeys); - } - - public HTableDescriptor[] listTables() throws IOException { - return delegate.listTables(); - } - - public String[] getTableNames() throws IOException { - return delegate.getTableNames(); - } - - public TableName[] listTableNames() throws IOException { - return delegate.listTableNames(); - } - - public HTableDescriptor getHTableDescriptor(TableName tableName) throws IOException { - return delegate.getHTableDescriptor(tableName); - } - - public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException { - return delegate.getHTableDescriptor(tableName); - } - - public HRegionLocation locateRegion(TableName tableName, byte[] row) throws IOException { - return delegate.locateRegion(tableName, row); - } - - public HRegionLocation locateRegion(byte[] tableName, byte[] row) throws IOException { - return delegate.locateRegion(tableName, row); - } - - public void clearRegionCache() { - delegate.clearRegionCache(); - } - - public void clearRegionCache(TableName tableName) { - delegate.clearRegionCache(tableName); - } - - public void clearRegionCache(byte[] tableName) { - delegate.clearRegionCache(tableName); - } - - public HRegionLocation relocateRegion(TableName tableName, byte[] row) throws IOException { - return delegate.relocateRegion(tableName, row); - } - - public HRegionLocation relocateRegion(byte[] tableName, byte[] row) throws IOException { - return delegate.relocateRegion(tableName, row); - } - - public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception, - ServerName source) { - delegate.updateCachedLocations(tableName, rowkey, exception, source); - } - - public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception, - HRegionLocation source) { - delegate.updateCachedLocations(tableName, rowkey, exception, source); - } - - public void updateCachedLocations(byte[] tableName, byte[] rowkey, Object exception, - HRegionLocation source) { - delegate.updateCachedLocations(tableName, rowkey, exception, source); - } - - public HRegionLocation locateRegion(byte[] regionName) throws IOException { - return delegate.locateRegion(regionName); - } - - public List locateRegions(TableName tableName) throws IOException { - return delegate.locateRegions(tableName); - } - - public List locateRegions(byte[] tableName) throws IOException { - return delegate.locateRegions(tableName); - } - - public List - locateRegions(TableName tableName, boolean useCache, boolean offlined) throws IOException { - return delegate.locateRegions(tableName, useCache, offlined); - } - - public List locateRegions(byte[] tableName, boolean useCache, boolean offlined) - throws IOException { - return delegate.locateRegions(tableName, useCache, offlined); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface getMaster() - throws IOException { - return delegate.getMaster(); - } - - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface - getAdmin(ServerName serverName) throws IOException { - return delegate.getAdmin(serverName); - } - - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface - getAdmin(ServerName serverName, boolean getMaster) throws IOException { - return delegate.getAdmin(serverName, getMaster); - } - - public HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reload) - throws IOException { - return delegate.getRegionLocation(tableName, row, reload); - } - - public HRegionLocation getRegionLocation(byte[] tableName, byte[] row, boolean reload) - throws IOException { - return delegate.getRegionLocation(tableName, row, reload); - } - - public void processBatch(List actions, TableName tableName, ExecutorService pool, - Object[] results) throws IOException, InterruptedException { - delegate.processBatch(actions, tableName, pool, results); - } - - public void processBatch(List actions, byte[] tableName, ExecutorService pool, - Object[] results) throws IOException, InterruptedException { - delegate.processBatch(actions, tableName, pool, results); - } - - public void processBatchCallback(List list, TableName tableName, - ExecutorService pool, Object[] results, Callback callback) throws IOException, - InterruptedException { - delegate.processBatchCallback(list, tableName, pool, results, callback); - } - - public void processBatchCallback(List list, byte[] tableName, - ExecutorService pool, Object[] results, Callback callback) throws IOException, - InterruptedException { - delegate.processBatchCallback(list, tableName, pool, results, callback); - } - - public void setRegionCachePrefetch(TableName tableName, boolean enable) { - delegate.setRegionCachePrefetch(tableName, enable); - } - - public void setRegionCachePrefetch(byte[] tableName, boolean enable) { - delegate.setRegionCachePrefetch(tableName, enable); - } - - public boolean getRegionCachePrefetch(TableName tableName) { - return delegate.getRegionCachePrefetch(tableName); - } - - public boolean getRegionCachePrefetch(byte[] tableName) { - return delegate.getRegionCachePrefetch(tableName); - } - - public int getCurrentNrHRS() throws IOException { - return delegate.getCurrentNrHRS(); - } - - public HTableDescriptor[] getHTableDescriptorsByTableName(List tableNames) - throws IOException { - return delegate.getHTableDescriptorsByTableName(tableNames); - } - - public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException { - return delegate.getHTableDescriptors(tableNames); - } - - public boolean isClosed() { - return delegate.isClosed(); - } - - public void clearCaches(ServerName sn) { - delegate.clearCaches(sn); - } - - public void close() throws IOException { - delegate.close(); - } - - public void deleteCachedRegionLocation(HRegionLocation location) { - delegate.deleteCachedRegionLocation(location); - } - - public MasterKeepAliveConnection getKeepAliveMasterService() - throws MasterNotRunningException { - return delegate.getKeepAliveMasterService(); - } - - public boolean isDeadServer(ServerName serverName) { - return delegate.isDeadServer(serverName); - } - @Override public NonceGenerator getNonceGenerator() { return ng; // don't use nonces for coprocessor connection } - - @Override - public AsyncProcess getAsyncProcess() { - return delegate.getAsyncProcess(); - } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index aabac3f..ef24627 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -675,7 +675,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { try { List toRetry = new ArrayList(); Configuration conf = getConf(); - boolean success = RpcRetryingCallerFactory.instantiate(conf). newCaller() + boolean success = RpcRetryingCallerFactory.instantiate(conf, + null). newCaller() .callWithRetries(svrCallable, Integer.MAX_VALUE); if (!success) { LOG.warn("Attempt to bulk load region containing " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9b0c0e3..93c814a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -57,6 +57,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.sun.org.apache.regexp.internal.RE; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -64,24 +65,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.DroppedSnapshotException; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; -import org.apache.hadoop.hbase.HDFSBlocksDistribution; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; @@ -113,6 +98,7 @@ import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; @@ -509,6 +495,7 @@ public class HRegion implements HeapSize { // , Writable{ private final MetricsRegion metricsRegion; private final MetricsRegionWrapperImpl metricsRegionWrapper; private final Durability durability; + private final boolean regionStatsEnabled; /** * HRegion constructor. This constructor should only be used for testing and @@ -651,6 +638,13 @@ public class HRegion implements HeapSize { // , Writable{ this.disallowWritesInRecovering = conf.getBoolean(HConstants.DISALLOW_WRITES_IN_RECOVERING, HConstants.DEFAULT_DISALLOW_WRITES_IN_RECOVERING_CONFIG); + + // disable stats tracking system tables, but check the config for everything else + this.regionStatsEnabled = htd.getTableName().getNamespaceAsString().equals + (NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) ? + false : + conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, + HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); } void setHTableSpecificConf() { @@ -4795,18 +4789,18 @@ public class HRegion implements HeapSize { // , Writable{ return results; } - public void mutateRow(RowMutations rm) throws IOException { + public ClientProtos.RegionLoadStats mutateRow(RowMutations rm) throws IOException { // Don't need nonces here - RowMutations only supports puts and deletes - mutateRowsWithLocks(rm.getMutations(), Collections.singleton(rm.getRow())); + return mutateRowsWithLocks(rm.getMutations(), Collections.singleton(rm.getRow())); } /** * Perform atomic mutations within the region w/o nonces. * See {@link #mutateRowsWithLocks(Collection, Collection, long, long)} */ - public void mutateRowsWithLocks(Collection mutations, + public ClientProtos.RegionLoadStats mutateRowsWithLocks(Collection mutations, Collection rowsToLock) throws IOException { - mutateRowsWithLocks(mutations, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE); + return mutateRowsWithLocks(mutations, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE); } /** @@ -4821,10 +4815,25 @@ public class HRegion implements HeapSize { // , Writable{ * rowsToLock is sorted in order to avoid deadlocks. * @throws IOException */ - public void mutateRowsWithLocks(Collection mutations, + public ClientProtos.RegionLoadStats mutateRowsWithLocks(Collection mutations, Collection rowsToLock, long nonceGroup, long nonce) throws IOException { MultiRowMutationProcessor proc = new MultiRowMutationProcessor(mutations, rowsToLock); processRowsWithLocks(proc, -1, nonceGroup, nonce); + return getRegionStats(); + } + + /** + * Get the current region stats + * @return + */ + public ClientProtos.RegionLoadStats getRegionStats() { + if (!regionStatsEnabled) { + return null; + } + ClientProtos.RegionLoadStats.Builder stats = ClientProtos.RegionLoadStats.newBuilder(); + stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreSize.get() * 100) / this + .memstoreFlushSize))); + return stats.build(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 2e7b022..81d76ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -248,8 +248,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } private static ResultOrException getResultOrException( - final ClientProtos.Result r, final int index) { - return getResultOrException(ResponseConverter.buildActionResult(r), index); + final ClientProtos.Result r, final int index, final ClientProtos.RegionLoadStats stats) { + return getResultOrException(ResponseConverter.buildActionResult(r, stats), index); } private static ResultOrException getResultOrException(final Exception e, final int index) { @@ -345,7 +345,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @param cellScanner if non-null, the mutation data -- the Cell content. * @throws IOException */ - private void mutateRows(final HRegion region, final List actions, + private ClientProtos.RegionLoadStats mutateRows(final HRegion region, + final List actions, final CellScanner cellScanner) throws IOException { if (!region.getRegionInfo().isMetaTable()) { regionServer.cacheFlusher.reclaimMemStoreMemory(); @@ -371,7 +372,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name()); } } - region.mutateRow(rm); + return region.mutateRow(rm); } /** @@ -605,7 +606,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, case SUCCESS: builder.addResultOrException(getResultOrException( - ClientProtos.Result.getDefaultInstance(), index)); + ClientProtos.Result.getDefaultInstance(), index, region.getRegionStats())); break; } } @@ -1660,7 +1661,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // How does this call happen? It may need some work to play well w/ the surroundings. // Need to return an item per Action along w/ Action index. TODO. try { - mutateRows(region, regionAction.getActionList(), cellScanner); + ClientProtos.RegionLoadStats stats = mutateRows(region, regionAction.getActionList(), + cellScanner); + // add the stats to the request + if(stats != null) { + responseBuilder.addRegionActionResult(RegionActionResult.newBuilder() + .addResultOrException(ResultOrException.newBuilder().setLoadStats(stats))); + } } catch (IOException e) { // As it's atomic, we may expect it's a global failure. regionActionResultBuilder.setException(ResponseConverter.buildException(e)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index ffb79c1..0db6a45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -159,7 +159,7 @@ public class WALEditsReplaySink { private void replayEdits(final HRegionLocation regionLoc, final HRegionInfo regionInfo, final List entries) throws IOException { try { - RpcRetryingCallerFactory factory = RpcRetryingCallerFactory.instantiate(conf); + RpcRetryingCallerFactory factory = RpcRetryingCallerFactory.instantiate(conf, null); ReplayServerCallable callable = new ReplayServerCallable(this.conn, this.tableName, regionLoc, regionInfo, entries); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 35471a9..62a18d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -122,7 +122,7 @@ public class HConnectionTestingUtility { NonceGenerator ng = Mockito.mock(NonceGenerator.class); Mockito.when(c.getNonceGenerator()).thenReturn(ng); Mockito.when(c.getAsyncProcess()).thenReturn(new AsyncProcess( - c, conf, null, RpcRetryingCallerFactory.instantiate(conf), false)); + c, conf, null, RpcRetryingCallerFactory.instantiate(conf, null), false)); Mockito.doNothing().when(c).incCount(); Mockito.doNothing().when(c).decCount(); return c; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java new file mode 100644 index 0000000..d4a0d14 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -0,0 +1,76 @@ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.backoff.ServerStatistics; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +/** + * Test that we can actually send and use region metrics to slowdown client writes + */ +@Category(MediumTests.class) +public class TestClientPushback { + + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static final byte[] tableName = Bytes.toBytes("client-pushback"); + private static final byte[] family = Bytes.toBytes("f"); + private static final byte[] qualifier = Bytes.toBytes("q"); + private static long flushSizeBytes = 1024; + + @BeforeClass + public static void setupCluster() throws Exception{ + Configuration conf = UTIL.getConfiguration(); + // enable backpressure + conf.setBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, true); + // turn the memstore size way down so we don't need to write a lot to see changes in memstore + // load + conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, + flushSizeBytes); + // ensure we block the flushes when we are double that flushsize + conf.setLong("hbase.hregion.memstore.block.multiplier", 2); + + UTIL.startMiniCluster(); + UTIL.createTable(tableName, family); + } + + @AfterClass + public static void teardownCluster() throws Exception{ + UTIL.shutdownMiniCluster(); + } + + @Test + public void testClientTrackesServerPushback() throws Exception{ + Configuration conf = UTIL.getConfiguration(); + TableName tablename = TableName.valueOf(tableName); + HTable table = new HTable(conf, tablename); + //make sure we flush after each put + table.setAutoFlushTo(true); + + // write some data + Put p = new Put(Bytes.toBytes("row")); + p.add(family, qualifier, Bytes.toBytes("value1")); + table.put(p); + + // get the stats for the region hosting our table + ClusterConnection conn = ConnectionManager.getConnectionInternal(conf); + ServerStatisticTracker stats = conn.getStatisticsTracker(); + assertNotNull( "No stats configured for the client!", stats); + // get the names so we can query the stats + ServerName server = UTIL.getHBaseCluster().getRegionServer(0).getServerName(); + byte[] regionName = UTIL.getHBaseCluster().getRegionServer(0).getOnlineRegions(tablename).get + (0).getRegionName(); + + // check to see we found some load on the memstore + ServerStatistics serverStats = stats.getServerStatsForTesting(server); + ServerStatistics.RegionStatistics regionStats = serverStats.getStatsForRegion(regionName); + assertEquals(15, regionStats.getMemstoreLoadPercent()); + } +} \ No newline at end of file