From 6d2183fd1c2eb0176688b701d527f96a6e600ee2 Mon Sep 17 00:00:00 2001 From: Ashish Singhi Date: Wed, 11 Mar 2015 19:54:26 +0530 Subject: [PATCH] HBASE-13205 [branch-1] Backport HBASE-11598 Add simple rpc throttling --- .../java/org/apache/hadoop/hbase/client/Admin.java | 18 + .../hadoop/hbase/client/ConnectionManager.java | 16 +- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 30 + .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 155 + .../quotas/InvalidQuotaSettingsException.java | 32 + .../hbase/quotas/QuotaExceededException.java | 34 + .../apache/hadoop/hbase/quotas/QuotaFilter.java | 110 + .../apache/hadoop/hbase/quotas/QuotaRetriever.java | 185 + .../org/apache/hadoop/hbase/quotas/QuotaScope.java | 43 + .../apache/hadoop/hbase/quotas/QuotaSettings.java | 124 + .../hadoop/hbase/quotas/QuotaSettingsFactory.java | 267 ++ .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 412 ++ .../org/apache/hadoop/hbase/quotas/QuotaType.java | 31 + .../hadoop/hbase/quotas/ThrottleSettings.java | 106 + .../apache/hadoop/hbase/quotas/ThrottleType.java | 34 + .../hadoop/hbase/quotas/ThrottlingException.java | 170 + .../java/org/apache/hadoop/hbase/util/Bytes.java | 18 + .../java/org/apache/hadoop/hbase/util/Sleeper.java | 7 + hbase-protocol/pom.xml | 1 + .../hbase/protobuf/generated/HBaseProtos.java | 134 +- .../hbase/protobuf/generated/MasterProtos.java | 2320 ++++++++++- .../hbase/protobuf/generated/QuotaProtos.java | 4378 ++++++++++++++++++++ hbase-protocol/src/main/protobuf/HBase.proto | 10 + hbase-protocol/src/main/protobuf/Master.proto | 18 + hbase-protocol/src/main/protobuf/Quota.proto | 73 + .../coprocessor/BaseMasterAndRegionObserver.java | 51 + .../hbase/coprocessor/BaseMasterObserver.java | 50 + .../hadoop/hbase/coprocessor/MasterObserver.java | 105 + .../org/apache/hadoop/hbase/ipc/RpcServer.java | 5 + .../hadoop/hbase/ipc/RpcServerInterface.java | 2 + .../org/apache/hadoop/hbase/master/HMaster.java | 17 + .../hadoop/hbase/master/MasterCoprocessorHost.java | 105 + .../hadoop/hbase/master/MasterRpcServices.java | 12 + .../apache/hadoop/hbase/master/MasterServices.java | 7 + .../hadoop/hbase/quotas/DefaultOperationQuota.java | 144 + .../hadoop/hbase/quotas/MasterQuotaManager.java | 426 ++ .../hadoop/hbase/quotas/NoopOperationQuota.java | 84 + .../hadoop/hbase/quotas/NoopQuotaLimiter.java | 90 + .../apache/hadoop/hbase/quotas/OperationQuota.java | 128 + .../org/apache/hadoop/hbase/quotas/QuotaCache.java | 326 ++ .../apache/hadoop/hbase/quotas/QuotaLimiter.java | 80 + .../hadoop/hbase/quotas/QuotaLimiterFactory.java | 39 + .../org/apache/hadoop/hbase/quotas/QuotaState.java | 119 + .../org/apache/hadoop/hbase/quotas/QuotaUtil.java | 311 ++ .../apache/hadoop/hbase/quotas/RateLimiter.java | 181 + .../hbase/quotas/RegionServerQuotaManager.java | 199 + .../hadoop/hbase/quotas/TimeBasedLimiter.java | 206 + .../apache/hadoop/hbase/quotas/UserQuotaState.java | 202 + .../hadoop/hbase/regionserver/HRegionServer.java | 35 + .../hadoop/hbase/regionserver/RSRpcServices.java | 70 +- .../hbase/regionserver/RegionServerServices.java | 13 + .../hbase/security/access/AccessController.java | 31 + .../hbase/util/BoundedPriorityBlockingQueue.java | 1 + .../hadoop/hbase/MockRegionServerServices.java | 12 + .../hbase/coprocessor/TestMasterObserver.java | 51 + .../hadoop/hbase/master/MockRegionServer.java | 12 + .../hadoop/hbase/master/TestCatalogJanitor.java | 6 + .../apache/hadoop/hbase/quotas/TestQuotaAdmin.java | 206 + .../apache/hadoop/hbase/quotas/TestQuotaState.java | 235 ++ .../hadoop/hbase/quotas/TestQuotaTableUtil.java | 184 + .../hadoop/hbase/quotas/TestQuotaThrottle.java | 422 ++ .../hadoop/hbase/quotas/TestRateLimiter.java | 114 + .../security/access/TestAccessController.java | 63 + hbase-shell/src/main/ruby/hbase.rb | 10 + hbase-shell/src/main/ruby/hbase/hbase.rb | 5 + hbase-shell/src/main/ruby/hbase/quotas.rb | 216 + hbase-shell/src/main/ruby/shell.rb | 13 + hbase-shell/src/main/ruby/shell/commands.rb | 4 + .../src/main/ruby/shell/commands/list_quotas.rb | 52 + .../src/main/ruby/shell/commands/set_quota.rb | 70 + 70 files changed, 13201 insertions(+), 239 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java create mode 100644 hbase-protocol/src/main/protobuf/Quota.proto create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java create mode 100644 hbase-shell/src/main/ruby/hbase/quotas.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/list_quotas.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/set_quota.rb diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 852ac42..6cc7686 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -41,6 +41,9 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.quotas.QuotaFilter; +import org.apache.hadoop.hbase.quotas.QuotaRetriever; +import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; @@ -1274,6 +1277,21 @@ public interface Admin extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs */ void deleteSnapshots(final Pattern pattern) throws IOException; + + /** + * Apply the new quota settings. + * @param quota the quota settings + * @throws IOException if a remote or network exception occurs + */ + void setQuota(final QuotaSettings quota) throws IOException; + + /** + * Return a QuotaRetriever to list the quotas based on the filter. + * @param filter the quota settings filter + * @return the quota retriever + * @throws IOException if a remote or network exception occurs + */ + QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException; /** * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 31fe510..048ca4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -18,9 +18,6 @@ */ package org.apache.hadoop.hbase.client; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; @@ -45,11 +42,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -162,6 +154,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanReq import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest; @@ -1994,6 +1988,12 @@ class ConnectionManager { throws ServiceException { return stub.getClusterStatus(controller, request); } + + @Override + public SetQuotaResponse setQuota(RpcController controller, SetQuotaRequest request) + throws ServiceException { + return stub.setQuota(controller, request); + } @Override public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 520b953..6195944 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -131,6 +131,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; +import org.apache.hadoop.hbase.quotas.QuotaFilter; +import org.apache.hadoop.hbase.quotas.QuotaRetriever; +import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; @@ -3616,6 +3619,33 @@ public class HBaseAdmin implements Admin { } }); } + + /** + * Apply the new quota settings. + * @param quota the quota settings + * @throws IOException if a remote or network exception occurs + */ + @Override + public void setQuota(final QuotaSettings quota) throws IOException { + executeCallable(new MasterCallable(getConnection()) { + @Override + public Void call(int callTimeout) throws ServiceException { + this.master.setQuota(null, QuotaSettings.buildSetQuotaRequestProto(quota)); + return null; + } + }); + } + + /** + * Return a Quota Scanner to list the quotas based on the filter. + * @param filter the quota settings filter + * @return the quota scanner + * @throws IOException if a remote or network exception occurs + */ + @Override + public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException { + return QuotaRetriever.open(conf, filter); + } private V executeCallable(MasterCallable callable) throws IOException { RpcRetryingCaller caller = rpcCallerFactory.newCaller(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 55311e2..94490c0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -35,6 +35,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -115,6 +116,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; @@ -125,6 +127,9 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescripto import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; +import org.apache.hadoop.hbase.quotas.QuotaScope; +import org.apache.hadoop.hbase.quotas.QuotaType; +import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.security.access.Permission; @@ -2835,6 +2840,156 @@ public final class ProtobufUtil { } return result; } + + /** + * Convert a protocol buffer TimeUnit to a client TimeUnit + * @param proto + * @return the converted client TimeUnit + */ + public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) { + switch (proto) { + case NANOSECONDS: + return TimeUnit.NANOSECONDS; + case MICROSECONDS: + return TimeUnit.MICROSECONDS; + case MILLISECONDS: + return TimeUnit.MILLISECONDS; + case SECONDS: + return TimeUnit.SECONDS; + case MINUTES: + return TimeUnit.MINUTES; + case HOURS: + return TimeUnit.HOURS; + case DAYS: + return TimeUnit.DAYS; + } + throw new RuntimeException("Invalid TimeUnit " + proto); + } + + /** + * Convert a client TimeUnit to a protocol buffer TimeUnit + * @param timeUnit + * @return the converted protocol buffer TimeUnit + */ + public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) { + switch (timeUnit) { + case NANOSECONDS: + return HBaseProtos.TimeUnit.NANOSECONDS; + case MICROSECONDS: + return HBaseProtos.TimeUnit.MICROSECONDS; + case MILLISECONDS: + return HBaseProtos.TimeUnit.MILLISECONDS; + case SECONDS: + return HBaseProtos.TimeUnit.SECONDS; + case MINUTES: + return HBaseProtos.TimeUnit.MINUTES; + case HOURS: + return HBaseProtos.TimeUnit.HOURS; + case DAYS: + return HBaseProtos.TimeUnit.DAYS; + } + throw new RuntimeException("Invalid TimeUnit " + timeUnit); + } + + /** + * Convert a protocol buffer ThrottleType to a client ThrottleType + * @param proto + * @return the converted client ThrottleType + */ + public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) { + switch (proto) { + case REQUEST_NUMBER: + return ThrottleType.REQUEST_NUMBER; + case REQUEST_SIZE: + return ThrottleType.REQUEST_SIZE; + default: + throw new RuntimeException("Invalid ThrottleType " + proto); + } + } + + /** + * Convert a client ThrottleType to a protocol buffer ThrottleType + * @param type + * @return the converted protocol buffer ThrottleType + */ + public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) { + switch (type) { + case REQUEST_NUMBER: + return QuotaProtos.ThrottleType.REQUEST_NUMBER; + case REQUEST_SIZE: + return QuotaProtos.ThrottleType.REQUEST_SIZE; + } + throw new RuntimeException("Invalid ThrottleType " + type); + } + + /** + * Convert a protocol buffer QuotaScope to a client QuotaScope + * @param proto + * @return the converted client QuotaScope + */ + public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) { + switch (proto) { + case CLUSTER: + return QuotaScope.CLUSTER; + case MACHINE: + return QuotaScope.MACHINE; + } + throw new RuntimeException("Invalid QuotaScope " + proto); + } + + /** + * Convert a client QuotaScope to a protocol buffer QuotaScope + * @param scope + * @return the converted protocol buffer QuotaScope + */ + public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) { + switch (scope) { + case CLUSTER: + return QuotaProtos.QuotaScope.CLUSTER; + case MACHINE: + return QuotaProtos.QuotaScope.MACHINE; + } + throw new RuntimeException("Invalid QuotaScope " + scope); + } + + /** + * Convert a protocol buffer QuotaType to a client QuotaType + * @param proto + * @return the converted client QuotaType + */ + public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { + switch (proto) { + case THROTTLE: + return QuotaType.THROTTLE; + } + throw new RuntimeException("Invalid QuotaType " + proto); + } + + /** + * Convert a client QuotaType to a protocol buffer QuotaType + * @param type + * @return the converted protocol buffer QuotaType + */ + public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { + switch (type) { + case THROTTLE: + return QuotaProtos.QuotaType.THROTTLE; + } + throw new RuntimeException("Invalid QuotaType " + type); + } + + /** + * Build a protocol buffer TimedQuota + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @param scope the quota scope + * @return the protocol buffer TimedQuota + */ + public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, + final QuotaScope scope) { + return QuotaProtos.TimedQuota.newBuilder().setSoftLimit(limit) + .setTimeUnit(toProtoTimeUnit(timeUnit)).setScope(toProtoQuotaScope(scope)).build(); + } /** * Generates a marker for the WAL so that we propagate the notion of a bulk region load diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java new file mode 100644 index 0000000..54a1545 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.quotas; + +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Generic quota exceeded exception for invalid settings + */ +@InterfaceAudience.Private +public class InvalidQuotaSettingsException extends DoNotRetryIOException { + public InvalidQuotaSettingsException(String msg) { + super(msg); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java new file mode 100644 index 0000000..e0386b5 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.quotas; + +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Generic quota exceeded exception + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class QuotaExceededException extends DoNotRetryIOException { + public QuotaExceededException(String msg) { + super(msg); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java new file mode 100644 index 0000000..c3db6ee --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Strings; + +/** + * Filter to use to filter the QuotaRetriever results. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class QuotaFilter { + private Set types = new HashSet(); + private boolean hasFilters = false; + private String namespaceRegex; + private String tableRegex; + private String userRegex; + + public QuotaFilter() { + } + + /** + * Set the user filter regex + * @param regex the user filter + * @return the quota filter object + */ + public QuotaFilter setUserFilter(final String regex) { + this.userRegex = regex; + hasFilters |= !Strings.isEmpty(regex); + return this; + } + + /** + * Set the table filter regex + * @param regex the table filter + * @return the quota filter object + */ + public QuotaFilter setTableFilter(final String regex) { + this.tableRegex = regex; + hasFilters |= !Strings.isEmpty(regex); + return this; + } + + /** + * Set the namespace filter regex + * @param regex the namespace filter + * @return the quota filter object + */ + public QuotaFilter setNamespaceFilter(final String regex) { + this.namespaceRegex = regex; + hasFilters |= !Strings.isEmpty(regex); + return this; + } + + /** + * Add a type to the filter list + * @param type the type to filter on + * @return the quota filter object + */ + public QuotaFilter addTypeFilter(final QuotaType type) { + this.types.add(type); + hasFilters |= true; + return this; + } + + /** @return true if the filter is empty */ + public boolean isNull() { + return !hasFilters; + } + + /** @return the QuotaType types that we want to filter one */ + public Set getTypeFilters() { + return types; + } + + /** @return the Namespace filter regex */ + public String getNamespaceFilter() { + return namespaceRegex; + } + + /** @return the Table filter regex */ + public String getTableFilter() { + return tableRegex; + } + + /** @return the User filter regex */ + public String getUserFilter() { + return userRegex; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java new file mode 100644 index 0000000..68c8e0a --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.quotas; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Queue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.util.StringUtils; + +/** + * Scanner to iterate over the quota settings. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class QuotaRetriever implements Closeable, Iterable { + private static final Log LOG = LogFactory.getLog(QuotaRetriever.class); + + private final Queue cache = new LinkedList(); + private ResultScanner scanner; + /** + * Connection to use. + * Could pass one in and have this class use it but this class wants to be standalone. + */ + private Connection connection; + private Table table; + + private QuotaRetriever() { + } + + void init(final Configuration conf, final Scan scan) throws IOException { + this.connection = ConnectionFactory.createConnection(conf); + this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME); + try { + scanner = table.getScanner(scan); + } catch (IOException e) { + try { + close(); + } catch (IOException ioe) { + LOG.warn("Failed getting scanner and then failed close on cleanup", e); + } + throw e; + } + } + + public void close() throws IOException { + if (this.table != null) { + this.table.close(); + this.table = null; + } + if (this.connection != null) { + this.connection.close(); + this.connection = null; + } + } + + public QuotaSettings next() throws IOException { + if (cache.isEmpty()) { + Result result = scanner.next(); + if (result == null) return null; + + QuotaTableUtil.parseResult(result, new QuotaTableUtil.QuotasVisitor() { + @Override + public void visitUserQuotas(String userName, Quotas quotas) { + cache.addAll(QuotaSettingsFactory.fromUserQuotas(userName, quotas)); + } + + @Override + public void visitUserQuotas(String userName, TableName table, Quotas quotas) { + cache.addAll(QuotaSettingsFactory.fromUserQuotas(userName, table, quotas)); + } + + @Override + public void visitUserQuotas(String userName, String namespace, Quotas quotas) { + cache.addAll(QuotaSettingsFactory.fromUserQuotas(userName, namespace, quotas)); + } + + @Override + public void visitTableQuotas(TableName tableName, Quotas quotas) { + cache.addAll(QuotaSettingsFactory.fromTableQuotas(tableName, quotas)); + } + + @Override + public void visitNamespaceQuotas(String namespace, Quotas quotas) { + cache.addAll(QuotaSettingsFactory.fromNamespaceQuotas(namespace, quotas)); + } + }); + } + return cache.poll(); + } + + @Override + public Iterator iterator() { + return new Iter(); + } + + private class Iter implements Iterator { + QuotaSettings cache; + + public Iter() { + try { + cache = QuotaRetriever.this.next(); + } catch (IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + } + + @Override + public boolean hasNext() { + return cache != null; + } + + @Override + public QuotaSettings next() { + QuotaSettings result = cache; + try { + cache = QuotaRetriever.this.next(); + } catch (IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + return result; + } + + @Override + public void remove() { + throw new RuntimeException("remove() not supported"); + } + } + + /** + * Open a QuotaRetriever with no filter, all the quota settings will be returned. + * @param conf Configuration object to use. + * @return the QuotaRetriever + * @throws IOException if a remote or network exception occurs + */ + public static QuotaRetriever open(final Configuration conf) throws IOException { + return open(conf, null); + } + + /** + * Open a QuotaRetriever with the specified filter. + * @param conf Configuration object to use. + * @param filter the QuotaFilter + * @return the QuotaRetriever + * @throws IOException if a remote or network exception occurs + */ + public static QuotaRetriever open(final Configuration conf, final QuotaFilter filter) + throws IOException { + Scan scan = QuotaTableUtil.makeScan(filter); + QuotaRetriever scanner = new QuotaRetriever(); + scanner.init(conf, scan); + return scanner; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java new file mode 100644 index 0000000..2e215b6 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaScope.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Describe the Scope of the quota rules. + * The quota can be enforced at the cluster level or at machine level. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum QuotaScope { + /** + * The specified throttling rules will be applied at the cluster level. + * A limit of 100req/min means 100req/min in total. + * If you execute 50req on a machine and then 50req on another machine + * then you have to wait your quota to fill up. + */ + CLUSTER, + + /** + * The specified throttling rules will be applied on the machine level. + * A limit of 100req/min means that each machine can execute 100req/min. + */ + MACHINE, +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java new file mode 100644 index 0000000..592c4db --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettings.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class QuotaSettings { + private final String userName; + private final String namespace; + private final TableName tableName; + + protected QuotaSettings(final String userName, final TableName tableName, + final String namespace) { + this.userName = userName; + this.namespace = namespace; + this.tableName = tableName; + } + + public abstract QuotaType getQuotaType(); + + public String getUserName() { + return userName; + } + + public TableName getTableName() { + return tableName; + } + + public String getNamespace() { + return namespace; + } + + /** + * Convert a QuotaSettings to a protocol buffer SetQuotaRequest. + * This is used internally by the Admin client to serialize the quota settings + * and send them to the master. + */ + public static SetQuotaRequest buildSetQuotaRequestProto(final QuotaSettings settings) { + SetQuotaRequest.Builder builder = SetQuotaRequest.newBuilder(); + if (settings.getUserName() != null) { + builder.setUserName(settings.getUserName()); + } + if (settings.getTableName() != null) { + builder.setTableName(ProtobufUtil.toProtoTableName(settings.getTableName())); + } + if (settings.getNamespace() != null) { + builder.setNamespace(settings.getNamespace()); + } + settings.setupSetQuotaRequest(builder); + return builder.build(); + } + + /** + * Called by toSetQuotaRequestProto() + * the subclass should implement this method to set the specific SetQuotaRequest + * properties. + */ + protected abstract void setupSetQuotaRequest(SetQuotaRequest.Builder builder); + + protected String ownerToString() { + StringBuilder builder = new StringBuilder(); + if (userName != null) { + builder.append("USER => '"); + builder.append(userName); + builder.append("', "); + } + if (tableName != null) { + builder.append("TABLE => '"); + builder.append(tableName.toString()); + builder.append("', "); + } + if (namespace != null) { + builder.append("NAMESPACE => '"); + builder.append(namespace); + builder.append("', "); + } + return builder.toString(); + } + + protected static String sizeToString(final long size) { + if (size >= (1L << 50)) return String.format("%dP", size / (1L << 50)); + if (size >= (1L << 40)) return String.format("%dT", size / (1L << 40)); + if (size >= (1L << 30)) return String.format("%dG", size / (1L << 30)); + if (size >= (1L << 20)) return String.format("%dM", size / (1L << 20)); + if (size >= (1L << 10)) return String.format("%dK", size / (1L << 10)); + return String.format("%dB", size); + } + + protected static String timeToString(final TimeUnit timeUnit) { + switch (timeUnit) { + case NANOSECONDS: return "nsec"; + case MICROSECONDS: return "usec"; + case MILLISECONDS: return "msec"; + case SECONDS: return "sec"; + case MINUTES: return "min"; + case HOURS: return "hour"; + case DAYS: return "day"; + } + throw new RuntimeException("Invalid TimeUnit " + timeUnit); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java new file mode 100644 index 0000000..e29fef1 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java @@ -0,0 +1,267 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class QuotaSettingsFactory { + static class QuotaGlobalsSettingsBypass extends QuotaSettings { + private final boolean bypassGlobals; + + QuotaGlobalsSettingsBypass(final String userName, final TableName tableName, + final String namespace, final boolean bypassGlobals) { + super(userName, tableName, namespace); + this.bypassGlobals = bypassGlobals; + } + + @Override + public QuotaType getQuotaType() { + return QuotaType.GLOBAL_BYPASS; + } + + @Override + protected void setupSetQuotaRequest(SetQuotaRequest.Builder builder) { + builder.setBypassGlobals(bypassGlobals); + } + + @Override + public String toString() { + return "GLOBAL_BYPASS => " + bypassGlobals; + } + } + + /* ========================================================================== + * QuotaSettings from the Quotas object + */ + static List fromUserQuotas(final String userName, final Quotas quotas) { + return fromQuotas(userName, null, null, quotas); + } + + static List fromUserQuotas(final String userName, final TableName tableName, + final Quotas quotas) { + return fromQuotas(userName, tableName, null, quotas); + } + + static List fromUserQuotas(final String userName, final String namespace, + final Quotas quotas) { + return fromQuotas(userName, null, namespace, quotas); + } + + static List fromTableQuotas(final TableName tableName, final Quotas quotas) { + return fromQuotas(null, tableName, null, quotas); + } + + static List fromNamespaceQuotas(final String namespace, final Quotas quotas) { + return fromQuotas(null, null, namespace, quotas); + } + + private static List fromQuotas(final String userName, final TableName tableName, + final String namespace, final Quotas quotas) { + List settings = new ArrayList(); + if (quotas.hasThrottle()) { + settings.addAll(fromThrottle(userName, tableName, namespace, quotas.getThrottle())); + } + if (quotas.getBypassGlobals() == true) { + settings.add(new QuotaGlobalsSettingsBypass(userName, tableName, namespace, true)); + } + return settings; + } + + private static List fromThrottle(final String userName, final TableName tableName, + final String namespace, final QuotaProtos.Throttle throttle) { + List settings = new ArrayList(); + if (throttle.hasReqNum()) { + settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace, + ThrottleType.REQUEST_NUMBER, throttle.getReqNum())); + } + if (throttle.hasReqSize()) { + settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace, + ThrottleType.REQUEST_SIZE, throttle.getReqSize())); + } + return settings; + } + + /* ========================================================================== + * RPC Throttle + */ + + /** + * Throttle the specified user. + * + * @param userName the user to throttle + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @return the quota settings + */ + public static QuotaSettings throttleUser(final String userName, final ThrottleType type, + final long limit, final TimeUnit timeUnit) { + return throttle(userName, null, null, type, limit, timeUnit); + } + + /** + * Throttle the specified user on the specified table. + * + * @param userName the user to throttle + * @param tableName the table to throttle + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @return the quota settings + */ + public static QuotaSettings throttleUser(final String userName, final TableName tableName, + final ThrottleType type, final long limit, final TimeUnit timeUnit) { + return throttle(userName, tableName, null, type, limit, timeUnit); + } + + /** + * Throttle the specified user on the specified namespace. + * + * @param userName the user to throttle + * @param namespace the namespace to throttle + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @return the quota settings + */ + public static QuotaSettings throttleUser(final String userName, final String namespace, + final ThrottleType type, final long limit, final TimeUnit timeUnit) { + return throttle(userName, null, namespace, type, limit, timeUnit); + } + + /** + * Remove the throttling for the specified user. + * + * @param userName the user + * @return the quota settings + */ + public static QuotaSettings unthrottleUser(final String userName) { + return throttle(userName, null, null, null, 0, null); + } + + /** + * Remove the throttling for the specified user on the specified table. + * + * @param userName the user + * @param tableName the table + * @return the quota settings + */ + public static QuotaSettings unthrottleUser(final String userName, final TableName tableName) { + return throttle(userName, tableName, null, null, 0, null); + } + + /** + * Remove the throttling for the specified user on the specified namespace. + * + * @param userName the user + * @param namespace the namespace + * @return the quota settings + */ + public static QuotaSettings unthrottleUser(final String userName, final String namespace) { + return throttle(userName, null, namespace, null, 0, null); + } + + /** + * Throttle the specified table. + * + * @param tableName the table to throttle + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @return the quota settings + */ + public static QuotaSettings throttleTable(final TableName tableName, final ThrottleType type, + final long limit, final TimeUnit timeUnit) { + return throttle(null, tableName, null, type, limit, timeUnit); + } + + /** + * Remove the throttling for the specified table. + * + * @param tableName the table + * @return the quota settings + */ + public static QuotaSettings unthrottleTable(final TableName tableName) { + return throttle(null, tableName, null, null, 0, null); + } + + /** + * Throttle the specified namespace. + * + * @param namespace the namespace to throttle + * @param type the type of throttling + * @param limit the allowed number of request/data per timeUnit + * @param timeUnit the limit time unit + * @return the quota settings + */ + public static QuotaSettings throttleNamespace(final String namespace, final ThrottleType type, + final long limit, final TimeUnit timeUnit) { + return throttle(null, null, namespace, type, limit, timeUnit); + } + + /** + * Remove the throttling for the specified namespace. + * + * @param namespace the namespace + * @return the quota settings + */ + public static QuotaSettings unthrottleNamespace(final String namespace) { + return throttle(null, null, namespace, null, 0, null); + } + + /* Throttle helper */ + private static QuotaSettings throttle(final String userName, final TableName tableName, + final String namespace, final ThrottleType type, final long limit, + final TimeUnit timeUnit) { + QuotaProtos.ThrottleRequest.Builder builder = QuotaProtos.ThrottleRequest.newBuilder(); + if (type != null) { + builder.setType(ProtobufUtil.toProtoThrottleType(type)); + } + if (timeUnit != null) { + builder.setTimedQuota(ProtobufUtil.toTimedQuota(limit, timeUnit, QuotaScope.MACHINE)); + } + return new ThrottleSettings(userName, tableName, namespace, builder.build()); + } + + /* ========================================================================== + * Global Settings + */ + + /** + * Set the "bypass global settings" for the specified user + * + * @param userName the user to throttle + * @param bypassGlobals true if the global settings should be bypassed + * @return the quota settings + */ + public static QuotaSettings bypassGlobals(final String userName, final boolean bypassGlobals) { + return new QuotaGlobalsSettingsBypass(userName, null, null, bypassGlobals); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java new file mode 100644 index 0000000..0ad81ae --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -0,0 +1,412 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.quotas; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.QualifierFilter; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Strings; + +/** + * Helper class to interact with the quota table. + *
+ *     ROW-KEY      FAM/QUAL        DATA
+ *   n. q:s         
+ *   t.     q:s         
+ *   u.      q:s         
+ *   u.      q:s.
+ * u. q:s.: + * + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class QuotaTableUtil { + private static final Log LOG = LogFactory.getLog(QuotaTableUtil.class); + + /** System table for quotas */ + public static final TableName QUOTA_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota"); + + protected static final byte[] QUOTA_FAMILY_INFO = Bytes.toBytes("q"); + protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u"); + protected static final byte[] QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s"); + protected static final byte[] QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s."); + protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u."); + protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t."); + protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n."); + + /* ========================================================================= + * Quota "settings" helpers + */ + public static Quotas getTableQuota(final Connection connection, final TableName table) + throws IOException { + return getQuotas(connection, getTableRowKey(table)); + } + + public static Quotas getNamespaceQuota(final Connection connection, final String namespace) + throws IOException { + return getQuotas(connection, getNamespaceRowKey(namespace)); + } + + public static Quotas getUserQuota(final Connection connection, final String user) + throws IOException { + return getQuotas(connection, getUserRowKey(user)); + } + + public static Quotas getUserQuota(final Connection connection, final String user, + final TableName table) throws IOException { + return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); + } + + public static Quotas getUserQuota(final Connection connection, final String user, + final String namespace) throws IOException { + return getQuotas(connection, getUserRowKey(user), + getSettingsQualifierForUserNamespace(namespace)); + } + + private static Quotas getQuotas(final Connection connection, final byte[] rowKey) + throws IOException { + return getQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS); + } + + private static Quotas getQuotas(final Connection connection, final byte[] rowKey, + final byte[] qualifier) throws IOException { + Get get = new Get(rowKey); + get.addColumn(QUOTA_FAMILY_INFO, qualifier); + Result result = doGet(connection, get); + if (result.isEmpty()) { + return null; + } + return quotasFromData(result.getValue(QUOTA_FAMILY_INFO, qualifier)); + } + + public static Get makeGetForTableQuotas(final TableName table) { + Get get = new Get(getTableRowKey(table)); + get.addFamily(QUOTA_FAMILY_INFO); + return get; + } + + public static Get makeGetForNamespaceQuotas(final String namespace) { + Get get = new Get(getNamespaceRowKey(namespace)); + get.addFamily(QUOTA_FAMILY_INFO); + return get; + } + + public static Get makeGetForUserQuotas(final String user, final Iterable tables, + final Iterable namespaces) { + Get get = new Get(getUserRowKey(user)); + get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); + for (final TableName table: tables) { + get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserTable(table)); + } + for (final String ns: namespaces) { + get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserNamespace(ns)); + } + return get; + } + + public static Scan makeScan(final QuotaFilter filter) { + Scan scan = new Scan(); + scan.addFamily(QUOTA_FAMILY_INFO); + if (filter != null && !filter.isNull()) { + scan.setFilter(makeFilter(filter)); + } + return scan; + } + + /** + * converts quotafilter to serializeable filterlists. + */ + public static Filter makeFilter(final QuotaFilter filter) { + FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); + if (!Strings.isEmpty(filter.getUserFilter())) { + FilterList userFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE); + boolean hasFilter = false; + + if (!Strings.isEmpty(filter.getNamespaceFilter())) { + FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); + nsFilters.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + nsFilters.addFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, + new RegexStringComparator( + getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); + userFilters.addFilter(nsFilters); + hasFilter = true; + } + if (!Strings.isEmpty(filter.getTableFilter())) { + FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); + tableFilters.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + tableFilters.addFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, + new RegexStringComparator( + getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); + userFilters.addFilter(tableFilters); + hasFilter = true; + } + if (!hasFilter) { + userFilters.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, + new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); + } + + filterList.addFilter(userFilters); + } else if (!Strings.isEmpty(filter.getTableFilter())) { + filterList.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, + new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0))); + } else if (!Strings.isEmpty(filter.getNamespaceFilter())) { + filterList.addFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, + new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0))); + } + return filterList; + } + + public static interface UserQuotasVisitor { + void visitUserQuotas(final String userName, final Quotas quotas) + throws IOException; + void visitUserQuotas(final String userName, final TableName table, final Quotas quotas) + throws IOException; + void visitUserQuotas(final String userName, final String namespace, final Quotas quotas) + throws IOException; + } + + public static interface TableQuotasVisitor { + void visitTableQuotas(final TableName tableName, final Quotas quotas) + throws IOException; + } + + public static interface NamespaceQuotasVisitor { + void visitNamespaceQuotas(final String namespace, final Quotas quotas) + throws IOException; + } + + public static interface QuotasVisitor extends UserQuotasVisitor, + TableQuotasVisitor, NamespaceQuotasVisitor { + } + + public static void parseResult(final Result result, final QuotasVisitor visitor) + throws IOException { + byte[] row = result.getRow(); + if (isNamespaceRowKey(row)) { + parseNamespaceResult(result, visitor); + } else if (isTableRowKey(row)) { + parseTableResult(result, visitor); + } else if (isUserRowKey(row)) { + parseUserResult(result, visitor); + } else { + LOG.warn("unexpected row-key: " + Bytes.toString(row)); + } + } + + public static void parseNamespaceResult(final Result result, + final NamespaceQuotasVisitor visitor) throws IOException { + String namespace = getNamespaceFromRowKey(result.getRow()); + parseNamespaceResult(namespace, result, visitor); + } + + protected static void parseNamespaceResult(final String namespace, final Result result, + final NamespaceQuotasVisitor visitor) throws IOException { + byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); + if (data != null) { + Quotas quotas = quotasFromData(data); + visitor.visitNamespaceQuotas(namespace, quotas); + } + } + + public static void parseTableResult(final Result result, final TableQuotasVisitor visitor) + throws IOException { + TableName table = getTableFromRowKey(result.getRow()); + parseTableResult(table, result, visitor); + } + + protected static void parseTableResult(final TableName table, final Result result, + final TableQuotasVisitor visitor) throws IOException { + byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); + if (data != null) { + Quotas quotas = quotasFromData(data); + visitor.visitTableQuotas(table, quotas); + } + } + + public static void parseUserResult(final Result result, final UserQuotasVisitor visitor) + throws IOException { + String userName = getUserFromRowKey(result.getRow()); + parseUserResult(userName, result, visitor); + } + + protected static void parseUserResult(final String userName, final Result result, + final UserQuotasVisitor visitor) throws IOException { + Map familyMap = result.getFamilyMap(QUOTA_FAMILY_INFO); + if (familyMap == null || familyMap.isEmpty()) return; + + for (Map.Entry entry: familyMap.entrySet()) { + Quotas quotas = quotasFromData(entry.getValue()); + if (Bytes.startsWith(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX)) { + String name = Bytes.toString(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX.length); + if (name.charAt(name.length() - 1) == TableName.NAMESPACE_DELIM) { + String namespace = name.substring(0, name.length() - 1); + visitor.visitUserQuotas(userName, namespace, quotas); + } else { + TableName table = TableName.valueOf(name); + visitor.visitUserQuotas(userName, table, quotas); + } + } else if (Bytes.equals(entry.getKey(), QUOTA_QUALIFIER_SETTINGS)) { + visitor.visitUserQuotas(userName, quotas); + } + } + } + + /* ========================================================================= + * Quotas protobuf helpers + */ + protected static Quotas quotasFromData(final byte[] data) throws IOException { + int magicLen = ProtobufUtil.lengthOfPBMagic(); + if (!ProtobufUtil.isPBMagicPrefix(data, 0, magicLen)) { + throw new IOException("Missing pb magic prefix"); + } + return Quotas.parseFrom(new ByteArrayInputStream(data, magicLen, data.length - magicLen)); + } + + protected static byte[] quotasToData(final Quotas data) throws IOException { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + stream.write(ProtobufUtil.PB_MAGIC); + data.writeTo(stream); + return stream.toByteArray(); + } + + public static boolean isEmptyQuota(final Quotas quotas) { + boolean hasSettings = false; + hasSettings |= quotas.hasThrottle(); + hasSettings |= quotas.hasBypassGlobals(); + return !hasSettings; + } + + /* ========================================================================= + * HTable helpers + */ + protected static Result doGet(final Connection connection, final Get get) + throws IOException { + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + return table.get(get); + } + } + + protected static Result[] doGet(final Connection connection, final List gets) + throws IOException { + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + return table.get(gets); + } + } + + /* ========================================================================= + * Quota table row key helpers + */ + protected static byte[] getUserRowKey(final String user) { + return Bytes.add(QUOTA_USER_ROW_KEY_PREFIX, Bytes.toBytes(user)); + } + + protected static byte[] getTableRowKey(final TableName table) { + return Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, table.getName()); + } + + protected static byte[] getNamespaceRowKey(final String namespace) { + return Bytes.add(QUOTA_NAMESPACE_ROW_KEY_PREFIX, Bytes.toBytes(namespace)); + } + + protected static byte[] getSettingsQualifierForUserTable(final TableName tableName) { + return Bytes.add(QUOTA_QUALIFIER_SETTINGS_PREFIX, tableName.getName()); + } + + protected static byte[] getSettingsQualifierForUserNamespace(final String namespace) { + return Bytes.add(QUOTA_QUALIFIER_SETTINGS_PREFIX, + Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); + } + + protected static String getUserRowKeyRegex(final String user) { + return getRowKeyRegEx(QUOTA_USER_ROW_KEY_PREFIX, user); + } + + protected static String getTableRowKeyRegex(final String table) { + return getRowKeyRegEx(QUOTA_TABLE_ROW_KEY_PREFIX, table); + } + + protected static String getNamespaceRowKeyRegex(final String namespace) { + return getRowKeyRegEx(QUOTA_NAMESPACE_ROW_KEY_PREFIX, namespace); + } + + private static String getRowKeyRegEx(final byte[] prefix, final String regex) { + return '^' + Pattern.quote(Bytes.toString(prefix)) + regex + '$'; + } + + protected static String getSettingsQualifierRegexForUserTable(final String table) { + return '^' + Pattern.quote(Bytes.toString(QUOTA_QUALIFIER_SETTINGS_PREFIX)) + + table + "(? THROTTLE"); + if (proto.hasType()) { + builder.append(", THROTTLE_TYPE => "); + builder.append(proto.getType().toString()); + } + if (proto.hasTimedQuota()) { + QuotaProtos.TimedQuota timedQuota = proto.getTimedQuota(); + builder.append(", LIMIT => "); + if (timedQuota.hasSoftLimit()) { + switch (getThrottleType()) { + case REQUEST_NUMBER: + builder.append(String.format("%dreq", timedQuota.getSoftLimit())); + break; + case REQUEST_SIZE: + builder.append(sizeToString(timedQuota.getSoftLimit())); + break; + } + } else if (timedQuota.hasShare()) { + builder.append(String.format("%.2f%%", timedQuota.getShare())); + } + builder.append('/'); + builder.append(timeToString(ProtobufUtil.toTimeUnit(timedQuota.getTimeUnit()))); + if (timedQuota.hasScope()) { + builder.append(", SCOPE => "); + builder.append(timedQuota.getScope().toString()); + } + } else { + builder.append(", LIMIT => NONE"); + } + return builder.toString(); + } + + static ThrottleSettings fromTimedQuota(final String userName, + final TableName tableName, final String namespace, + ThrottleType type, QuotaProtos.TimedQuota timedQuota) { + QuotaProtos.ThrottleRequest.Builder builder = QuotaProtos.ThrottleRequest.newBuilder(); + builder.setType(ProtobufUtil.toProtoThrottleType(type)); + builder.setTimedQuota(timedQuota); + return new ThrottleSettings(userName, tableName, namespace, builder.build()); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java new file mode 100644 index 0000000..bb5c093 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Describe the Throttle Type. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum ThrottleType { + /** Throttling based on the number of request per time-unit */ + REQUEST_NUMBER, + + /** Throttling based on the read+write data size */ + REQUEST_SIZE, +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java new file mode 100644 index 0000000..dad1edd --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java @@ -0,0 +1,170 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.quotas; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Describe the throttling result. + * + * TODO: At some point this will be handled on the client side to prevent + * operation to go on the server if the waitInterval is grater than the one got + * as result of this exception. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ThrottlingException extends QuotaExceededException { + private static final long serialVersionUID = 1406576492085155743L; + + private static final Log LOG = LogFactory.getLog(ThrottlingException.class); + + @InterfaceAudience.Public + @InterfaceStability.Evolving + public enum Type { + NumRequestsExceeded, + NumReadRequestsExceeded, + NumWriteRequestsExceeded, + WriteSizeExceeded, + ReadSizeExceeded, + } + + private static final String[] MSG_TYPE = new String[] { + "number of requests exceeded", + "number of read requests exceeded", + "number of write requests exceeded", + "write size limit exceeded", + "read size limit exceeded", + }; + + private static final String MSG_WAIT = " - wait "; + + private long waitInterval; + private Type type; + + public ThrottlingException(String msg) { + super(msg); + + // Dirty workaround to get the information after + // ((RemoteException)e.getCause()).unwrapRemoteException() + for (int i = 0; i < MSG_TYPE.length; ++i) { + int index = msg.indexOf(MSG_TYPE[i]); + if (index >= 0) { + String waitTimeStr = msg.substring(index + MSG_TYPE[i].length() + MSG_WAIT.length()); + type = Type.values()[i];; + waitInterval = timeFromString(waitTimeStr); + break; + } + } + } + + public ThrottlingException(final Type type, final long waitInterval, final String msg) { + super(msg); + this.waitInterval = waitInterval; + this.type = type; + } + + public Type getType() { + return this.type; + } + + public long getWaitInterval() { + return this.waitInterval; + } + + public static void throwNumRequestsExceeded(final long waitInterval) + throws ThrottlingException { + throwThrottlingException(Type.NumRequestsExceeded, waitInterval); + } + + public static void throwNumReadRequestsExceeded(final long waitInterval) + throws ThrottlingException { + throwThrottlingException(Type.NumReadRequestsExceeded, waitInterval); + } + + public static void throwNumWriteRequestsExceeded(final long waitInterval) + throws ThrottlingException { + throwThrottlingException(Type.NumWriteRequestsExceeded, waitInterval); + } + + public static void throwWriteSizeExceeded(final long waitInterval) + throws ThrottlingException { + throwThrottlingException(Type.WriteSizeExceeded, waitInterval); + } + + public static void throwReadSizeExceeded(final long waitInterval) + throws ThrottlingException { + throwThrottlingException(Type.ReadSizeExceeded, waitInterval); + } + + private static void throwThrottlingException(final Type type, final long waitInterval) + throws ThrottlingException { + String msg = MSG_TYPE[type.ordinal()] + MSG_WAIT + formatTime(waitInterval); + throw new ThrottlingException(type, waitInterval, msg); + } + + public static String formatTime(long timeDiff) { + StringBuilder buf = new StringBuilder(); + long hours = timeDiff / (60*60*1000); + long rem = (timeDiff % (60*60*1000)); + long minutes = rem / (60*1000); + rem = rem % (60*1000); + float seconds = rem / 1000.0f; + + if (hours != 0){ + buf.append(hours); + buf.append("hrs, "); + } + if (minutes != 0){ + buf.append(minutes); + buf.append("mins, "); + } + buf.append(String.format("%.2fsec", seconds)); + return buf.toString(); + } + + private static long timeFromString(String timeDiff) { + Pattern[] patterns = new Pattern[] { + Pattern.compile("^(\\d+\\.\\d\\d)sec"), + Pattern.compile("^(\\d+)mins, (\\d+\\.\\d\\d)sec"), + Pattern.compile("^(\\d+)hrs, (\\d+)mins, (\\d+\\.\\d\\d)sec") + }; + + for (int i = 0; i < patterns.length; ++i) { + Matcher m = patterns[i].matcher(timeDiff); + if (m.find()) { + long time = Math.round(Float.parseFloat(m.group(1 + i)) * 1000); + if (i > 0) { + time += Long.parseLong(m.group(i)) * (60 * 1000); + } + if (i > 1) { + time += Long.parseLong(m.group(i - 1)) * (60 * 60 * 1000); + } + return time; + } + } + + return -1; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index dd666e6..f3d226e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -362,6 +362,24 @@ public class Bytes { final byte [] b2) { return toString(b1, 0, b1.length) + sep + toString(b2, 0, b2.length); } + + /** + * This method will convert utf8 encoded bytes into a string. If the given byte array is null, + * this method will return null. + * @param b Presumed UTF-8 encoded byte array. + * @param off offset into array + * @return String made from b or null + */ + public static String toString(final byte[] b, int off) { + if (b == null) { + return null; + } + int len = b.length - off; + if (len <= 0) { + return ""; + } + return new String(b, off, len, UTF8_CHARSET); + } /** * This method will convert utf8 encoded bytes into a string. If diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java index 071250b..416ef02 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -114,4 +114,11 @@ public class Sleeper { triggerWake = false; } } + + /** + * @return the sleep period in milliseconds + */ + public final int getPeriod() { + return period; + } } diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index 3b0356d..08b379c 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -176,6 +176,7 @@ MapReduce.proto Master.proto MultiRowMutation.proto + Quota.proto RegionServerStatus.proto RowProcessor.proto RPC.proto diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 1dbce4d..5b6b0ea 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -139,6 +139,133 @@ public final class HBaseProtos { // @@protoc_insertion_point(enum_scope:CompareType) } + /** + * Protobuf enum {@code TimeUnit} + */ + public enum TimeUnit + implements com.google.protobuf.ProtocolMessageEnum { + /** + * NANOSECONDS = 1; + */ + NANOSECONDS(0, 1), + /** + * MICROSECONDS = 2; + */ + MICROSECONDS(1, 2), + /** + * MILLISECONDS = 3; + */ + MILLISECONDS(2, 3), + /** + * SECONDS = 4; + */ + SECONDS(3, 4), + /** + * MINUTES = 5; + */ + MINUTES(4, 5), + /** + * HOURS = 6; + */ + HOURS(5, 6), + /** + * DAYS = 7; + */ + DAYS(6, 7), + ; + + /** + * NANOSECONDS = 1; + */ + public static final int NANOSECONDS_VALUE = 1; + /** + * MICROSECONDS = 2; + */ + public static final int MICROSECONDS_VALUE = 2; + /** + * MILLISECONDS = 3; + */ + public static final int MILLISECONDS_VALUE = 3; + /** + * SECONDS = 4; + */ + public static final int SECONDS_VALUE = 4; + /** + * MINUTES = 5; + */ + public static final int MINUTES_VALUE = 5; + /** + * HOURS = 6; + */ + public static final int HOURS_VALUE = 6; + /** + * DAYS = 7; + */ + public static final int DAYS_VALUE = 7; + + + public final int getNumber() { return value; } + + public static TimeUnit valueOf(int value) { + switch (value) { + case 1: return NANOSECONDS; + case 2: return MICROSECONDS; + case 3: return MILLISECONDS; + case 4: return SECONDS; + case 5: return MINUTES; + case 6: return HOURS; + case 7: return DAYS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public TimeUnit findValueByNumber(int number) { + return TimeUnit.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final TimeUnit[] VALUES = values(); + + public static TimeUnit valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private TimeUnit(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:TimeUnit) + } + public interface TableNameOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -16359,8 +16486,11 @@ public final class HBaseProtos { "t\030\001 \001(\005*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS" + "_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n" + "\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_O" + - "P\020\006B>\n*org.apache.hadoop.hbase.protobuf." + - "generatedB\013HBaseProtosH\001\240\001\001" + "P\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICR" + + "OSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020" + + "\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*o" + + "rg.apache.hadoop.hbase.protobuf.generate" + + "dB\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 9150e43..aabbc8e 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -41475,6 +41475,1789 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:IsProcedureDoneResponse) } + public interface SetQuotaRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string user_name = 1; + /** + * optional string user_name = 1; + */ + boolean hasUserName(); + /** + * optional string user_name = 1; + */ + java.lang.String getUserName(); + /** + * optional string user_name = 1; + */ + com.google.protobuf.ByteString + getUserNameBytes(); + + // optional string user_group = 2; + /** + * optional string user_group = 2; + */ + boolean hasUserGroup(); + /** + * optional string user_group = 2; + */ + java.lang.String getUserGroup(); + /** + * optional string user_group = 2; + */ + com.google.protobuf.ByteString + getUserGroupBytes(); + + // optional string namespace = 3; + /** + * optional string namespace = 3; + */ + boolean hasNamespace(); + /** + * optional string namespace = 3; + */ + java.lang.String getNamespace(); + /** + * optional string namespace = 3; + */ + com.google.protobuf.ByteString + getNamespaceBytes(); + + // optional .TableName table_name = 4; + /** + * optional .TableName table_name = 4; + */ + boolean hasTableName(); + /** + * optional .TableName table_name = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * optional .TableName table_name = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // optional bool remove_all = 5; + /** + * optional bool remove_all = 5; + */ + boolean hasRemoveAll(); + /** + * optional bool remove_all = 5; + */ + boolean getRemoveAll(); + + // optional bool bypass_globals = 6; + /** + * optional bool bypass_globals = 6; + */ + boolean hasBypassGlobals(); + /** + * optional bool bypass_globals = 6; + */ + boolean getBypassGlobals(); + + // optional .ThrottleRequest throttle = 7; + /** + * optional .ThrottleRequest throttle = 7; + */ + boolean hasThrottle(); + /** + * optional .ThrottleRequest throttle = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle(); + /** + * optional .ThrottleRequest throttle = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder(); + } + /** + * Protobuf type {@code SetQuotaRequest} + */ + public static final class SetQuotaRequest extends + com.google.protobuf.GeneratedMessage + implements SetQuotaRequestOrBuilder { + // Use SetQuotaRequest.newBuilder() to construct. + private SetQuotaRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetQuotaRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetQuotaRequest defaultInstance; + public static SetQuotaRequest getDefaultInstance() { + return defaultInstance; + } + + public SetQuotaRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetQuotaRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + userName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + userGroup_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + namespace_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 40: { + bitField0_ |= 0x00000010; + removeAll_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + bypassGlobals_ = input.readBool(); + break; + } + case 58: { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + subBuilder = throttle_.toBuilder(); + } + throttle_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(throttle_); + throttle_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000040; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetQuotaRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetQuotaRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string user_name = 1; + public static final int USER_NAME_FIELD_NUMBER = 1; + private java.lang.Object userName_; + /** + * optional string user_name = 1; + */ + public boolean hasUserName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string user_name = 1; + */ + public java.lang.String getUserName() { + java.lang.Object ref = userName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + userName_ = s; + } + return s; + } + } + /** + * optional string user_name = 1; + */ + public com.google.protobuf.ByteString + getUserNameBytes() { + java.lang.Object ref = userName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string user_group = 2; + public static final int USER_GROUP_FIELD_NUMBER = 2; + private java.lang.Object userGroup_; + /** + * optional string user_group = 2; + */ + public boolean hasUserGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string user_group = 2; + */ + public java.lang.String getUserGroup() { + java.lang.Object ref = userGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + userGroup_ = s; + } + return s; + } + } + /** + * optional string user_group = 2; + */ + public com.google.protobuf.ByteString + getUserGroupBytes() { + java.lang.Object ref = userGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string namespace = 3; + public static final int NAMESPACE_FIELD_NUMBER = 3; + private java.lang.Object namespace_; + /** + * optional string namespace = 3; + */ + public boolean hasNamespace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string namespace = 3; + */ + public java.lang.String getNamespace() { + java.lang.Object ref = namespace_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + namespace_ = s; + } + return s; + } + } + /** + * optional string namespace = 3; + */ + public com.google.protobuf.ByteString + getNamespaceBytes() { + java.lang.Object ref = namespace_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + namespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .TableName table_name = 4; + public static final int TABLE_NAME_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * optional .TableName table_name = 4; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * optional .TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // optional bool remove_all = 5; + public static final int REMOVE_ALL_FIELD_NUMBER = 5; + private boolean removeAll_; + /** + * optional bool remove_all = 5; + */ + public boolean hasRemoveAll() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool remove_all = 5; + */ + public boolean getRemoveAll() { + return removeAll_; + } + + // optional bool bypass_globals = 6; + public static final int BYPASS_GLOBALS_FIELD_NUMBER = 6; + private boolean bypassGlobals_; + /** + * optional bool bypass_globals = 6; + */ + public boolean hasBypassGlobals() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool bypass_globals = 6; + */ + public boolean getBypassGlobals() { + return bypassGlobals_; + } + + // optional .ThrottleRequest throttle = 7; + public static final int THROTTLE_FIELD_NUMBER = 7; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_; + /** + * optional .ThrottleRequest throttle = 7; + */ + public boolean hasThrottle() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { + return throttle_; + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { + return throttle_; + } + + private void initFields() { + userName_ = ""; + userGroup_ = ""; + namespace_ = ""; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + removeAll_ = false; + bypassGlobals_ = false; + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasThrottle()) { + if (!getThrottle().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getUserNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUserGroupBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getNamespaceBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, tableName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(5, removeAll_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBool(6, bypassGlobals_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeMessage(7, throttle_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getUserNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUserGroupBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getNamespaceBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, removeAll_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, bypassGlobals_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, throttle_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) obj; + + boolean result = true; + result = result && (hasUserName() == other.hasUserName()); + if (hasUserName()) { + result = result && getUserName() + .equals(other.getUserName()); + } + result = result && (hasUserGroup() == other.hasUserGroup()); + if (hasUserGroup()) { + result = result && getUserGroup() + .equals(other.getUserGroup()); + } + result = result && (hasNamespace() == other.hasNamespace()); + if (hasNamespace()) { + result = result && getNamespace() + .equals(other.getNamespace()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasRemoveAll() == other.hasRemoveAll()); + if (hasRemoveAll()) { + result = result && (getRemoveAll() + == other.getRemoveAll()); + } + result = result && (hasBypassGlobals() == other.hasBypassGlobals()); + if (hasBypassGlobals()) { + result = result && (getBypassGlobals() + == other.getBypassGlobals()); + } + result = result && (hasThrottle() == other.hasThrottle()); + if (hasThrottle()) { + result = result && getThrottle() + .equals(other.getThrottle()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserName()) { + hash = (37 * hash) + USER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getUserName().hashCode(); + } + if (hasUserGroup()) { + hash = (37 * hash) + USER_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getUserGroup().hashCode(); + } + if (hasNamespace()) { + hash = (37 * hash) + NAMESPACE_FIELD_NUMBER; + hash = (53 * hash) + getNamespace().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasRemoveAll()) { + hash = (37 * hash) + REMOVE_ALL_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getRemoveAll()); + } + if (hasBypassGlobals()) { + hash = (37 * hash) + BYPASS_GLOBALS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBypassGlobals()); + } + if (hasThrottle()) { + hash = (37 * hash) + THROTTLE_FIELD_NUMBER; + hash = (53 * hash) + getThrottle().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SetQuotaRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + getThrottleFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + userName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + userGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + namespace_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + removeAll_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + bypassGlobals_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + if (throttleBuilder_ == null) { + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + } else { + throttleBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.userName_ = userName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.userGroup_ = userGroup_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.namespace_ = namespace_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.removeAll_ = removeAll_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.bypassGlobals_ = bypassGlobals_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + if (throttleBuilder_ == null) { + result.throttle_ = throttle_; + } else { + result.throttle_ = throttleBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance()) return this; + if (other.hasUserName()) { + bitField0_ |= 0x00000001; + userName_ = other.userName_; + onChanged(); + } + if (other.hasUserGroup()) { + bitField0_ |= 0x00000002; + userGroup_ = other.userGroup_; + onChanged(); + } + if (other.hasNamespace()) { + bitField0_ |= 0x00000004; + namespace_ = other.namespace_; + onChanged(); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasRemoveAll()) { + setRemoveAll(other.getRemoveAll()); + } + if (other.hasBypassGlobals()) { + setBypassGlobals(other.getBypassGlobals()); + } + if (other.hasThrottle()) { + mergeThrottle(other.getThrottle()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } + if (hasThrottle()) { + if (!getThrottle().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string user_name = 1; + private java.lang.Object userName_ = ""; + /** + * optional string user_name = 1; + */ + public boolean hasUserName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string user_name = 1; + */ + public java.lang.String getUserName() { + java.lang.Object ref = userName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + userName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string user_name = 1; + */ + public com.google.protobuf.ByteString + getUserNameBytes() { + java.lang.Object ref = userName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string user_name = 1; + */ + public Builder setUserName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + userName_ = value; + onChanged(); + return this; + } + /** + * optional string user_name = 1; + */ + public Builder clearUserName() { + bitField0_ = (bitField0_ & ~0x00000001); + userName_ = getDefaultInstance().getUserName(); + onChanged(); + return this; + } + /** + * optional string user_name = 1; + */ + public Builder setUserNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + userName_ = value; + onChanged(); + return this; + } + + // optional string user_group = 2; + private java.lang.Object userGroup_ = ""; + /** + * optional string user_group = 2; + */ + public boolean hasUserGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string user_group = 2; + */ + public java.lang.String getUserGroup() { + java.lang.Object ref = userGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + userGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string user_group = 2; + */ + public com.google.protobuf.ByteString + getUserGroupBytes() { + java.lang.Object ref = userGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + userGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string user_group = 2; + */ + public Builder setUserGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + userGroup_ = value; + onChanged(); + return this; + } + /** + * optional string user_group = 2; + */ + public Builder clearUserGroup() { + bitField0_ = (bitField0_ & ~0x00000002); + userGroup_ = getDefaultInstance().getUserGroup(); + onChanged(); + return this; + } + /** + * optional string user_group = 2; + */ + public Builder setUserGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + userGroup_ = value; + onChanged(); + return this; + } + + // optional string namespace = 3; + private java.lang.Object namespace_ = ""; + /** + * optional string namespace = 3; + */ + public boolean hasNamespace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string namespace = 3; + */ + public java.lang.String getNamespace() { + java.lang.Object ref = namespace_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + namespace_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string namespace = 3; + */ + public com.google.protobuf.ByteString + getNamespaceBytes() { + java.lang.Object ref = namespace_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + namespace_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string namespace = 3; + */ + public Builder setNamespace( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + namespace_ = value; + onChanged(); + return this; + } + /** + * optional string namespace = 3; + */ + public Builder clearNamespace() { + bitField0_ = (bitField0_ & ~0x00000004); + namespace_ = getDefaultInstance().getNamespace(); + onChanged(); + return this; + } + /** + * optional string namespace = 3; + */ + public Builder setNamespaceBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + namespace_ = value; + onChanged(); + return this; + } + + // optional .TableName table_name = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * optional .TableName table_name = 4; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * optional .TableName table_name = 4; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableName table_name = 4; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableName table_name = 4; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableName table_name = 4; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * optional .TableName table_name = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * optional .TableName table_name = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // optional bool remove_all = 5; + private boolean removeAll_ ; + /** + * optional bool remove_all = 5; + */ + public boolean hasRemoveAll() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool remove_all = 5; + */ + public boolean getRemoveAll() { + return removeAll_; + } + /** + * optional bool remove_all = 5; + */ + public Builder setRemoveAll(boolean value) { + bitField0_ |= 0x00000010; + removeAll_ = value; + onChanged(); + return this; + } + /** + * optional bool remove_all = 5; + */ + public Builder clearRemoveAll() { + bitField0_ = (bitField0_ & ~0x00000010); + removeAll_ = false; + onChanged(); + return this; + } + + // optional bool bypass_globals = 6; + private boolean bypassGlobals_ ; + /** + * optional bool bypass_globals = 6; + */ + public boolean hasBypassGlobals() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool bypass_globals = 6; + */ + public boolean getBypassGlobals() { + return bypassGlobals_; + } + /** + * optional bool bypass_globals = 6; + */ + public Builder setBypassGlobals(boolean value) { + bitField0_ |= 0x00000020; + bypassGlobals_ = value; + onChanged(); + return this; + } + /** + * optional bool bypass_globals = 6; + */ + public Builder clearBypassGlobals() { + bitField0_ = (bitField0_ & ~0x00000020); + bypassGlobals_ = false; + onChanged(); + return this; + } + + // optional .ThrottleRequest throttle = 7; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> throttleBuilder_; + /** + * optional .ThrottleRequest throttle = 7; + */ + public boolean hasThrottle() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getThrottle() { + if (throttleBuilder_ == null) { + return throttle_; + } else { + return throttleBuilder_.getMessage(); + } + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public Builder setThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { + if (throttleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + throttle_ = value; + onChanged(); + } else { + throttleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public Builder setThrottle( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder builderForValue) { + if (throttleBuilder_ == null) { + throttle_ = builderForValue.build(); + onChanged(); + } else { + throttleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public Builder mergeThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest value) { + if (throttleBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040) && + throttle_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance()) { + throttle_ = + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.newBuilder(throttle_).mergeFrom(value).buildPartial(); + } else { + throttle_ = value; + } + onChanged(); + } else { + throttleBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public Builder clearThrottle() { + if (throttleBuilder_ == null) { + throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance(); + onChanged(); + } else { + throttleBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder getThrottleBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getThrottleFieldBuilder().getBuilder(); + } + /** + * optional .ThrottleRequest throttle = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder() { + if (throttleBuilder_ != null) { + return throttleBuilder_.getMessageOrBuilder(); + } else { + return throttle_; + } + } + /** + * optional .ThrottleRequest throttle = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder> + getThrottleFieldBuilder() { + if (throttleBuilder_ == null) { + throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder>( + throttle_, + getParentForChildren(), + isClean()); + throttle_ = null; + } + return throttleBuilder_; + } + + // @@protoc_insertion_point(builder_scope:SetQuotaRequest) + } + + static { + defaultInstance = new SetQuotaRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SetQuotaRequest) + } + + public interface SetQuotaResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code SetQuotaResponse} + */ + public static final class SetQuotaResponse extends + com.google.protobuf.GeneratedMessage + implements SetQuotaResponseOrBuilder { + // Use SetQuotaResponse.newBuilder() to construct. + private SetQuotaResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetQuotaResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetQuotaResponse defaultInstance; + public static SetQuotaResponse getDefaultInstance() { + return defaultInstance; + } + + public SetQuotaResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetQuotaResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetQuotaResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetQuotaResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SetQuotaResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetQuotaResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:SetQuotaResponse) + } + + static { + defaultInstance = new SetQuotaResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SetQuotaResponse) + } + public interface MajorCompactionTimestampRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -43590,6 +45373,18 @@ public final class MasterProtos { com.google.protobuf.RpcCallback done); /** + * rpc SetQuota(.SetQuotaRequest) returns (.SetQuotaResponse); + * + *
+       ** Apply the new quota settings 
+       * 
+ */ + public abstract void setQuota( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, + com.google.protobuf.RpcCallback done); + + /** * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse); * *
@@ -43963,6 +45758,14 @@ public final class MasterProtos {
         }
 
         @java.lang.Override
+        public  void setQuota(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request,
+            com.google.protobuf.RpcCallback done) {
+          impl.setQuota(controller, request, done);
+        }
+
+        @java.lang.Override
         public  void getLastMajorCompactionTimestamp(
             com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
@@ -44087,8 +45890,10 @@ public final class MasterProtos {
             case 42:
               return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
             case 43:
-              return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
+              return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
             case 44:
+              return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
+            case 45:
               return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -44191,8 +45996,10 @@ public final class MasterProtos {
             case 42:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
             case 43:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
             case 44:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+            case 45:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -44295,9 +46102,11 @@ public final class MasterProtos {
             case 42:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
             case 43:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
             case 44:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+            case 45:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -44849,6 +46658,18 @@ public final class MasterProtos {
         com.google.protobuf.RpcCallback done);
 
     /**
+     * rpc SetQuota(.SetQuotaRequest) returns (.SetQuotaResponse);
+     *
+     * 
+     ** Apply the new quota settings 
+     * 
+ */ + public abstract void setQuota( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, + com.google.protobuf.RpcCallback done); + + /** * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse); * *
@@ -45110,11 +46931,16 @@ public final class MasterProtos {
               done));
           return;
         case 43:
+          this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request,
+            com.google.protobuf.RpcUtil.specializeCallback(
+              done));
+          return;
+        case 44:
           this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 44:
+        case 45:
           this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
@@ -45220,8 +47046,10 @@ public final class MasterProtos {
         case 42:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
         case 43:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
         case 44:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+        case 45:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -45324,9 +47152,11 @@ public final class MasterProtos {
         case 42:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
         case 43:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
         case 44:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+        case 45:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -45993,12 +47823,27 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()));
       }
 
+      public  void setQuota(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request,
+          com.google.protobuf.RpcCallback done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(43),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()));
+      }
+
       public  void getLastMajorCompactionTimestamp(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(43),
+          getDescriptor().getMethods().get(44),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -46013,7 +47858,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(44),
+          getDescriptor().getMethods().get(45),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -46245,6 +48090,11 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
           throws com.google.protobuf.ServiceException;
 
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
+          throws com.google.protobuf.ServiceException;
+
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
@@ -46779,12 +48629,24 @@ public final class MasterProtos {
       }
 
 
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse setQuota(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(43),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance());
+      }
+
+
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(43),
+          getDescriptor().getMethods().get(44),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -46796,7 +48658,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(44),
+          getDescriptor().getMethods().get(45),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -47218,6 +49080,16 @@ public final class MasterProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_IsProcedureDoneResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_SetQuotaRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_SetQuotaRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_SetQuotaResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_SetQuotaResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_MajorCompactionTimestampRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -47242,207 +49114,214 @@ public final class MasterProtos {
   static {
     java.lang.String[] descriptorData = {
       "\n\014Master.proto\032\013HBase.proto\032\014Client.prot" +
-      "o\032\023ClusterStatus.proto\"`\n\020AddColumnReque" +
-      "st\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022,\n\017co" +
-      "lumn_families\030\002 \002(\0132\023.ColumnFamilySchema" +
-      "\"\023\n\021AddColumnResponse\"J\n\023DeleteColumnReq" +
-      "uest\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022\023\n\013" +
-      "column_name\030\002 \002(\014\"\026\n\024DeleteColumnRespons" +
-      "e\"c\n\023ModifyColumnRequest\022\036\n\ntable_name\030\001" +
-      " \002(\0132\n.TableName\022,\n\017column_families\030\002 \002(" +
-      "\0132\023.ColumnFamilySchema\"\026\n\024ModifyColumnRe",
-      "sponse\"\\\n\021MoveRegionRequest\022 \n\006region\030\001 " +
-      "\002(\0132\020.RegionSpecifier\022%\n\020dest_server_nam" +
-      "e\030\002 \001(\0132\013.ServerName\"\024\n\022MoveRegionRespon" +
-      "se\"\200\001\n\035DispatchMergingRegionsRequest\022\"\n\010" +
-      "region_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010regi" +
-      "on_b\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcible" +
-      "\030\003 \001(\010:\005false\" \n\036DispatchMergingRegionsR" +
-      "esponse\"7\n\023AssignRegionRequest\022 \n\006region" +
-      "\030\001 \002(\0132\020.RegionSpecifier\"\026\n\024AssignRegion" +
-      "Response\"O\n\025UnassignRegionRequest\022 \n\006reg",
-      "ion\030\001 \002(\0132\020.RegionSpecifier\022\024\n\005force\030\002 \001" +
-      "(\010:\005false\"\030\n\026UnassignRegionResponse\"8\n\024O" +
-      "fflineRegionRequest\022 \n\006region\030\001 \002(\0132\020.Re" +
-      "gionSpecifier\"\027\n\025OfflineRegionResponse\"L" +
-      "\n\022CreateTableRequest\022\"\n\014table_schema\030\001 \002" +
-      "(\0132\014.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\"\025\n\023" +
-      "CreateTableResponse\"4\n\022DeleteTableReques" +
-      "t\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\025\n\023Del" +
-      "eteTableResponse\"T\n\024TruncateTableRequest" +
-      "\022\035\n\ttableName\030\001 \002(\0132\n.TableName\022\035\n\016prese",
-      "rveSplits\030\002 \001(\010:\005false\"\027\n\025TruncateTableR" +
-      "esponse\"4\n\022EnableTableRequest\022\036\n\ntable_n" +
-      "ame\030\001 \002(\0132\n.TableName\"\025\n\023EnableTableResp" +
-      "onse\"5\n\023DisableTableRequest\022\036\n\ntable_nam" +
-      "e\030\001 \002(\0132\n.TableName\"\026\n\024DisableTableRespo" +
-      "nse\"X\n\022ModifyTableRequest\022\036\n\ntable_name\030" +
-      "\001 \002(\0132\n.TableName\022\"\n\014table_schema\030\002 \002(\0132" +
-      "\014.TableSchema\"\025\n\023ModifyTableResponse\"K\n\026" +
-      "CreateNamespaceRequest\0221\n\023namespaceDescr" +
-      "iptor\030\001 \002(\0132\024.NamespaceDescriptor\"\031\n\027Cre",
-      "ateNamespaceResponse\"/\n\026DeleteNamespaceR" +
-      "equest\022\025\n\rnamespaceName\030\001 \002(\t\"\031\n\027DeleteN" +
-      "amespaceResponse\"K\n\026ModifyNamespaceReque" +
-      "st\0221\n\023namespaceDescriptor\030\001 \002(\0132\024.Namesp" +
-      "aceDescriptor\"\031\n\027ModifyNamespaceResponse" +
-      "\"6\n\035GetNamespaceDescriptorRequest\022\025\n\rnam" +
-      "espaceName\030\001 \002(\t\"S\n\036GetNamespaceDescript" +
-      "orResponse\0221\n\023namespaceDescriptor\030\001 \002(\0132" +
-      "\024.NamespaceDescriptor\"!\n\037ListNamespaceDe" +
-      "scriptorsRequest\"U\n ListNamespaceDescrip",
-      "torsResponse\0221\n\023namespaceDescriptor\030\001 \003(" +
-      "\0132\024.NamespaceDescriptor\"?\n&ListTableDesc" +
-      "riptorsByNamespaceRequest\022\025\n\rnamespaceNa" +
-      "me\030\001 \002(\t\"L\n\'ListTableDescriptorsByNamesp" +
-      "aceResponse\022!\n\013tableSchema\030\001 \003(\0132\014.Table" +
-      "Schema\"9\n ListTableNamesByNamespaceReque" +
-      "st\022\025\n\rnamespaceName\030\001 \002(\t\"B\n!ListTableNa" +
-      "mesByNamespaceResponse\022\035\n\ttableName\030\001 \003(" +
-      "\0132\n.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shut" +
-      "downResponse\"\023\n\021StopMasterRequest\"\024\n\022Sto",
-      "pMasterResponse\"\020\n\016BalanceRequest\"\'\n\017Bal" +
-      "anceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031Se" +
-      "tBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013s" +
-      "ynchronous\030\002 \001(\010\"8\n\032SetBalancerRunningRe" +
-      "sponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\027\n\025Ru" +
-      "nCatalogScanRequest\"-\n\026RunCatalogScanRes" +
-      "ponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableCata" +
-      "logJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034Ena" +
-      "bleCatalogJanitorResponse\022\022\n\nprev_value\030" +
-      "\001 \001(\010\" \n\036IsCatalogJanitorEnabledRequest\"",
-      "0\n\037IsCatalogJanitorEnabledResponse\022\r\n\005va" +
-      "lue\030\001 \002(\010\"9\n\017SnapshotRequest\022&\n\010snapshot" +
-      "\030\001 \002(\0132\024.SnapshotDescription\",\n\020Snapshot" +
-      "Response\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034Ge" +
-      "tCompletedSnapshotsRequest\"H\n\035GetComplet" +
-      "edSnapshotsResponse\022\'\n\tsnapshots\030\001 \003(\0132\024" +
-      ".SnapshotDescription\"?\n\025DeleteSnapshotRe" +
-      "quest\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescri" +
-      "ption\"\030\n\026DeleteSnapshotResponse\"@\n\026Resto" +
-      "reSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Sn",
-      "apshotDescription\"\031\n\027RestoreSnapshotResp" +
-      "onse\"?\n\025IsSnapshotDoneRequest\022&\n\010snapsho" +
-      "t\030\001 \001(\0132\024.SnapshotDescription\"U\n\026IsSnaps" +
-      "hotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022&\n\010" +
-      "snapshot\030\002 \001(\0132\024.SnapshotDescription\"F\n\034" +
-      "IsRestoreSnapshotDoneRequest\022&\n\010snapshot" +
-      "\030\001 \001(\0132\024.SnapshotDescription\"4\n\035IsRestor" +
-      "eSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" +
-      "se\"=\n\033GetSchemaAlterStatusRequest\022\036\n\ntab" +
-      "le_name\030\001 \002(\0132\n.TableName\"T\n\034GetSchemaAl",
-      "terStatusResponse\022\035\n\025yet_to_update_regio" +
-      "ns\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\202\001\n\032GetT" +
-      "ableDescriptorsRequest\022\037\n\013table_names\030\001 " +
-      "\003(\0132\n.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022includ" +
-      "e_sys_tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004" +
-      " \001(\t\"A\n\033GetTableDescriptorsResponse\022\"\n\014t" +
-      "able_schema\030\001 \003(\0132\014.TableSchema\"[\n\024GetTa" +
-      "bleNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022includ" +
-      "e_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003" +
-      " \001(\t\"8\n\025GetTableNamesResponse\022\037\n\013table_n",
-      "ames\030\001 \003(\0132\n.TableName\"\031\n\027GetClusterStat" +
-      "usRequest\"B\n\030GetClusterStatusResponse\022&\n" +
-      "\016cluster_status\030\001 \002(\0132\016.ClusterStatus\"\030\n" +
-      "\026IsMasterRunningRequest\"4\n\027IsMasterRunni" +
-      "ngResponse\022\031\n\021is_master_running\030\001 \002(\010\"@\n" +
-      "\024ExecProcedureRequest\022(\n\tprocedure\030\001 \002(\013" +
-      "2\025.ProcedureDescription\"F\n\025ExecProcedure" +
-      "Response\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013re" +
-      "turn_data\030\002 \001(\014\"B\n\026IsProcedureDoneReques" +
-      "t\022(\n\tprocedure\030\001 \001(\0132\025.ProcedureDescript",
-      "ion\"W\n\027IsProcedureDoneResponse\022\023\n\004done\030\001" +
-      " \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\0132\025.Procedur" +
-      "eDescription\"A\n\037MajorCompactionTimestamp" +
-      "Request\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"" +
-      "L\n(MajorCompactionTimestampForRegionRequ" +
-      "est\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"@\n" +
-      " MajorCompactionTimestampResponse\022\034\n\024com" +
-      "paction_timestamp\030\001 \002(\0032\327\031\n\rMasterServic" +
-      "e\022S\n\024GetSchemaAlterStatus\022\034.GetSchemaAlt" +
-      "erStatusRequest\032\035.GetSchemaAlterStatusRe",
-      "sponse\022P\n\023GetTableDescriptors\022\033.GetTable" +
-      "DescriptorsRequest\032\034.GetTableDescriptors" +
-      "Response\022>\n\rGetTableNames\022\025.GetTableName" +
-      "sRequest\032\026.GetTableNamesResponse\022G\n\020GetC" +
-      "lusterStatus\022\030.GetClusterStatusRequest\032\031" +
-      ".GetClusterStatusResponse\022D\n\017IsMasterRun" +
-      "ning\022\027.IsMasterRunningRequest\032\030.IsMaster" +
-      "RunningResponse\0222\n\tAddColumn\022\021.AddColumn" +
-      "Request\032\022.AddColumnResponse\022;\n\014DeleteCol" +
-      "umn\022\024.DeleteColumnRequest\032\025.DeleteColumn",
-      "Response\022;\n\014ModifyColumn\022\024.ModifyColumnR" +
-      "equest\032\025.ModifyColumnResponse\0225\n\nMoveReg" +
-      "ion\022\022.MoveRegionRequest\032\023.MoveRegionResp" +
-      "onse\022Y\n\026DispatchMergingRegions\022\036.Dispatc" +
-      "hMergingRegionsRequest\032\037.DispatchMerging" +
-      "RegionsResponse\022;\n\014AssignRegion\022\024.Assign" +
-      "RegionRequest\032\025.AssignRegionResponse\022A\n\016" +
-      "UnassignRegion\022\026.UnassignRegionRequest\032\027" +
-      ".UnassignRegionResponse\022>\n\rOfflineRegion" +
-      "\022\025.OfflineRegionRequest\032\026.OfflineRegionR",
-      "esponse\0228\n\013DeleteTable\022\023.DeleteTableRequ" +
-      "est\032\024.DeleteTableResponse\022>\n\rtruncateTab" +
-      "le\022\025.TruncateTableRequest\032\026.TruncateTabl" +
-      "eResponse\0228\n\013EnableTable\022\023.EnableTableRe" +
-      "quest\032\024.EnableTableResponse\022;\n\014DisableTa" +
-      "ble\022\024.DisableTableRequest\032\025.DisableTable" +
-      "Response\0228\n\013ModifyTable\022\023.ModifyTableReq" +
-      "uest\032\024.ModifyTableResponse\0228\n\013CreateTabl" +
-      "e\022\023.CreateTableRequest\032\024.CreateTableResp" +
-      "onse\022/\n\010Shutdown\022\020.ShutdownRequest\032\021.Shu",
-      "tdownResponse\0225\n\nStopMaster\022\022.StopMaster" +
-      "Request\032\023.StopMasterResponse\022,\n\007Balance\022" +
-      "\017.BalanceRequest\032\020.BalanceResponse\022M\n\022Se" +
-      "tBalancerRunning\022\032.SetBalancerRunningReq" +
-      "uest\032\033.SetBalancerRunningResponse\022A\n\016Run" +
-      "CatalogScan\022\026.RunCatalogScanRequest\032\027.Ru" +
-      "nCatalogScanResponse\022S\n\024EnableCatalogJan" +
-      "itor\022\034.EnableCatalogJanitorRequest\032\035.Ena" +
-      "bleCatalogJanitorResponse\022\\\n\027IsCatalogJa" +
-      "nitorEnabled\022\037.IsCatalogJanitorEnabledRe",
-      "quest\032 .IsCatalogJanitorEnabledResponse\022" +
-      "L\n\021ExecMasterService\022\032.CoprocessorServic" +
-      "eRequest\032\033.CoprocessorServiceResponse\022/\n" +
-      "\010Snapshot\022\020.SnapshotRequest\032\021.SnapshotRe" +
-      "sponse\022V\n\025GetCompletedSnapshots\022\035.GetCom" +
-      "pletedSnapshotsRequest\032\036.GetCompletedSna" +
-      "pshotsResponse\022A\n\016DeleteSnapshot\022\026.Delet" +
-      "eSnapshotRequest\032\027.DeleteSnapshotRespons" +
-      "e\022A\n\016IsSnapshotDone\022\026.IsSnapshotDoneRequ" +
-      "est\032\027.IsSnapshotDoneResponse\022D\n\017RestoreS",
-      "napshot\022\027.RestoreSnapshotRequest\032\030.Resto" +
-      "reSnapshotResponse\022V\n\025IsRestoreSnapshotD" +
-      "one\022\035.IsRestoreSnapshotDoneRequest\032\036.IsR" +
-      "estoreSnapshotDoneResponse\022>\n\rExecProced" +
-      "ure\022\025.ExecProcedureRequest\032\026.ExecProcedu" +
-      "reResponse\022E\n\024ExecProcedureWithRet\022\025.Exe" +
-      "cProcedureRequest\032\026.ExecProcedureRespons" +
-      "e\022D\n\017IsProcedureDone\022\027.IsProcedureDoneRe" +
-      "quest\032\030.IsProcedureDoneResponse\022D\n\017Modif" +
-      "yNamespace\022\027.ModifyNamespaceRequest\032\030.Mo",
-      "difyNamespaceResponse\022D\n\017CreateNamespace" +
-      "\022\027.CreateNamespaceRequest\032\030.CreateNamesp" +
-      "aceResponse\022D\n\017DeleteNamespace\022\027.DeleteN" +
-      "amespaceRequest\032\030.DeleteNamespaceRespons" +
-      "e\022Y\n\026GetNamespaceDescriptor\022\036.GetNamespa" +
-      "ceDescriptorRequest\032\037.GetNamespaceDescri" +
-      "ptorResponse\022_\n\030ListNamespaceDescriptors" +
-      "\022 .ListNamespaceDescriptorsRequest\032!.Lis" +
-      "tNamespaceDescriptorsResponse\022t\n\037ListTab" +
-      "leDescriptorsByNamespace\022\'.ListTableDesc",
-      "riptorsByNamespaceRequest\032(.ListTableDes" +
-      "criptorsByNamespaceResponse\022b\n\031ListTable" +
-      "NamesByNamespace\022!.ListTableNamesByNames" +
-      "paceRequest\032\".ListTableNamesByNamespaceR" +
-      "esponse\022f\n\037getLastMajorCompactionTimesta" +
-      "mp\022 .MajorCompactionTimestampRequest\032!.M" +
-      "ajorCompactionTimestampResponse\022x\n(getLa" +
-      "stMajorCompactionTimestampForRegion\022).Ma" +
-      "jorCompactionTimestampForRegionRequest\032!" +
-      ".MajorCompactionTimestampResponseBB\n*org",
-      ".apache.hadoop.hbase.protobuf.generatedB" +
-      "\014MasterProtosH\001\210\001\001\240\001\001"
+      "o\032\023ClusterStatus.proto\032\013Quota.proto\"`\n\020A" +
+      "ddColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Ta" +
+      "bleName\022,\n\017column_families\030\002 \002(\0132\023.Colum" +
+      "nFamilySchema\"\023\n\021AddColumnResponse\"J\n\023De" +
+      "leteColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n." +
+      "TableName\022\023\n\013column_name\030\002 \002(\014\"\026\n\024Delete" +
+      "ColumnResponse\"c\n\023ModifyColumnRequest\022\036\n" +
+      "\ntable_name\030\001 \002(\0132\n.TableName\022,\n\017column_" +
+      "families\030\002 \002(\0132\023.ColumnFamilySchema\"\026\n\024M",
+      "odifyColumnResponse\"\\\n\021MoveRegionRequest" +
+      "\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022%\n\020de" +
+      "st_server_name\030\002 \001(\0132\013.ServerName\"\024\n\022Mov" +
+      "eRegionResponse\"\200\001\n\035DispatchMergingRegio" +
+      "nsRequest\022\"\n\010region_a\030\001 \002(\0132\020.RegionSpec" +
+      "ifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpecifie" +
+      "r\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036DispatchMe" +
+      "rgingRegionsResponse\"7\n\023AssignRegionRequ" +
+      "est\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"\026\n" +
+      "\024AssignRegionResponse\"O\n\025UnassignRegionR",
+      "equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" +
+      "\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegion" +
+      "Response\"8\n\024OfflineRegionRequest\022 \n\006regi" +
+      "on\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025OfflineReg" +
+      "ionResponse\"L\n\022CreateTableRequest\022\"\n\014tab" +
+      "le_schema\030\001 \002(\0132\014.TableSchema\022\022\n\nsplit_k" +
+      "eys\030\002 \003(\014\"\025\n\023CreateTableResponse\"4\n\022Dele" +
+      "teTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Tab" +
+      "leName\"\025\n\023DeleteTableResponse\"T\n\024Truncat" +
+      "eTableRequest\022\035\n\ttableName\030\001 \002(\0132\n.Table",
+      "Name\022\035\n\016preserveSplits\030\002 \001(\010:\005false\"\027\n\025T" +
+      "runcateTableResponse\"4\n\022EnableTableReque" +
+      "st\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\025\n\023En" +
+      "ableTableResponse\"5\n\023DisableTableRequest" +
+      "\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\026\n\024Disa" +
+      "bleTableResponse\"X\n\022ModifyTableRequest\022\036" +
+      "\n\ntable_name\030\001 \002(\0132\n.TableName\022\"\n\014table_" +
+      "schema\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTabl" +
+      "eResponse\"K\n\026CreateNamespaceRequest\0221\n\023n" +
+      "amespaceDescriptor\030\001 \002(\0132\024.NamespaceDesc",
+      "riptor\"\031\n\027CreateNamespaceResponse\"/\n\026Del" +
+      "eteNamespaceRequest\022\025\n\rnamespaceName\030\001 \002" +
+      "(\t\"\031\n\027DeleteNamespaceResponse\"K\n\026ModifyN" +
+      "amespaceRequest\0221\n\023namespaceDescriptor\030\001" +
+      " \002(\0132\024.NamespaceDescriptor\"\031\n\027ModifyName" +
+      "spaceResponse\"6\n\035GetNamespaceDescriptorR" +
+      "equest\022\025\n\rnamespaceName\030\001 \002(\t\"S\n\036GetName" +
+      "spaceDescriptorResponse\0221\n\023namespaceDesc" +
+      "riptor\030\001 \002(\0132\024.NamespaceDescriptor\"!\n\037Li" +
+      "stNamespaceDescriptorsRequest\"U\n ListNam",
+      "espaceDescriptorsResponse\0221\n\023namespaceDe" +
+      "scriptor\030\001 \003(\0132\024.NamespaceDescriptor\"?\n&" +
+      "ListTableDescriptorsByNamespaceRequest\022\025" +
+      "\n\rnamespaceName\030\001 \002(\t\"L\n\'ListTableDescri" +
+      "ptorsByNamespaceResponse\022!\n\013tableSchema\030" +
+      "\001 \003(\0132\014.TableSchema\"9\n ListTableNamesByN" +
+      "amespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"B" +
+      "\n!ListTableNamesByNamespaceResponse\022\035\n\tt" +
+      "ableName\030\001 \003(\0132\n.TableName\"\021\n\017ShutdownRe" +
+      "quest\"\022\n\020ShutdownResponse\"\023\n\021StopMasterR",
+      "equest\"\024\n\022StopMasterResponse\"\020\n\016BalanceR" +
+      "equest\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" +
+      "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" +
+      "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" +
+      "ncerRunningResponse\022\032\n\022prev_balance_valu" +
+      "e\030\001 \001(\010\"\027\n\025RunCatalogScanRequest\"-\n\026RunC" +
+      "atalogScanResponse\022\023\n\013scan_result\030\001 \001(\005\"" +
+      "-\n\033EnableCatalogJanitorRequest\022\016\n\006enable" +
+      "\030\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022\022" +
+      "\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorEn",
+      "abledRequest\"0\n\037IsCatalogJanitorEnabledR" +
+      "esponse\022\r\n\005value\030\001 \002(\010\"9\n\017SnapshotReques" +
+      "t\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescriptio" +
+      "n\",\n\020SnapshotResponse\022\030\n\020expected_timeou" +
+      "t\030\001 \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"" +
+      "H\n\035GetCompletedSnapshotsResponse\022\'\n\tsnap" +
+      "shots\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Del" +
+      "eteSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.S" +
+      "napshotDescription\"\030\n\026DeleteSnapshotResp" +
+      "onse\"@\n\026RestoreSnapshotRequest\022&\n\010snapsh",
+      "ot\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027Restor" +
+      "eSnapshotResponse\"?\n\025IsSnapshotDoneReque" +
+      "st\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescripti" +
+      "on\"U\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001" +
+      "(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDe" +
+      "scription\"F\n\034IsRestoreSnapshotDoneReques" +
+      "t\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescriptio" +
+      "n\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n\004do" +
+      "ne\030\001 \001(\010:\005false\"=\n\033GetSchemaAlterStatusR" +
+      "equest\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"T",
+      "\n\034GetSchemaAlterStatusResponse\022\035\n\025yet_to" +
+      "_update_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002" +
+      " \001(\r\"\202\001\n\032GetTableDescriptorsRequest\022\037\n\013t" +
+      "able_names\030\001 \003(\0132\n.TableName\022\r\n\005regex\030\002 " +
+      "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" +
+      "\n\tnamespace\030\004 \001(\t\"A\n\033GetTableDescriptors" +
+      "Response\022\"\n\014table_schema\030\001 \003(\0132\014.TableSc" +
+      "hema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 " +
+      "\001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021" +
+      "\n\tnamespace\030\003 \001(\t\"8\n\025GetTableNamesRespon",
+      "se\022\037\n\013table_names\030\001 \003(\0132\n.TableName\"\031\n\027G" +
+      "etClusterStatusRequest\"B\n\030GetClusterStat" +
+      "usResponse\022&\n\016cluster_status\030\001 \002(\0132\016.Clu" +
+      "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" +
+      "IsMasterRunningResponse\022\031\n\021is_master_run" +
+      "ning\030\001 \002(\010\"@\n\024ExecProcedureRequest\022(\n\tpr" +
+      "ocedure\030\001 \002(\0132\025.ProcedureDescription\"F\n\025" +
+      "ExecProcedureResponse\022\030\n\020expected_timeou" +
+      "t\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsProced" +
+      "ureDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025.Proc",
+      "edureDescription\"W\n\027IsProcedureDoneRespo" +
+      "nse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030\002 \001" +
+      "(\0132\025.ProcedureDescription\"\273\001\n\017SetQuotaRe" +
+      "quest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002" +
+      " \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 " +
+      "\001(\0132\n.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" +
+      "ypass_globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020." +
+      "ThrottleRequest\"\022\n\020SetQuotaResponse\"A\n\037M" +
+      "ajorCompactionTimestampRequest\022\036\n\ntable_" +
+      "name\030\001 \002(\0132\n.TableName\"L\n(MajorCompactio",
+      "nTimestampForRegionRequest\022 \n\006region\030\001 \002" +
+      "(\0132\020.RegionSpecifier\"@\n MajorCompactionT" +
+      "imestampResponse\022\034\n\024compaction_timestamp" +
+      "\030\001 \002(\0032\210\032\n\rMasterService\022S\n\024GetSchemaAlt" +
+      "erStatus\022\034.GetSchemaAlterStatusRequest\032\035" +
+      ".GetSchemaAlterStatusResponse\022P\n\023GetTabl" +
+      "eDescriptors\022\033.GetTableDescriptorsReques" +
+      "t\032\034.GetTableDescriptorsResponse\022>\n\rGetTa" +
+      "bleNames\022\025.GetTableNamesRequest\032\026.GetTab" +
+      "leNamesResponse\022G\n\020GetClusterStatus\022\030.Ge",
+      "tClusterStatusRequest\032\031.GetClusterStatus" +
+      "Response\022D\n\017IsMasterRunning\022\027.IsMasterRu" +
+      "nningRequest\032\030.IsMasterRunningResponse\0222" +
+      "\n\tAddColumn\022\021.AddColumnRequest\032\022.AddColu" +
+      "mnResponse\022;\n\014DeleteColumn\022\024.DeleteColum" +
+      "nRequest\032\025.DeleteColumnResponse\022;\n\014Modif" +
+      "yColumn\022\024.ModifyColumnRequest\032\025.ModifyCo" +
+      "lumnResponse\0225\n\nMoveRegion\022\022.MoveRegionR" +
+      "equest\032\023.MoveRegionResponse\022Y\n\026DispatchM" +
+      "ergingRegions\022\036.DispatchMergingRegionsRe",
+      "quest\032\037.DispatchMergingRegionsResponse\022;" +
+      "\n\014AssignRegion\022\024.AssignRegionRequest\032\025.A" +
+      "ssignRegionResponse\022A\n\016UnassignRegion\022\026." +
+      "UnassignRegionRequest\032\027.UnassignRegionRe" +
+      "sponse\022>\n\rOfflineRegion\022\025.OfflineRegionR" +
+      "equest\032\026.OfflineRegionResponse\0228\n\013Delete" +
+      "Table\022\023.DeleteTableRequest\032\024.DeleteTable" +
+      "Response\022>\n\rtruncateTable\022\025.TruncateTabl" +
+      "eRequest\032\026.TruncateTableResponse\0228\n\013Enab" +
+      "leTable\022\023.EnableTableRequest\032\024.EnableTab",
+      "leResponse\022;\n\014DisableTable\022\024.DisableTabl" +
+      "eRequest\032\025.DisableTableResponse\0228\n\013Modif" +
+      "yTable\022\023.ModifyTableRequest\032\024.ModifyTabl" +
+      "eResponse\0228\n\013CreateTable\022\023.CreateTableRe" +
+      "quest\032\024.CreateTableResponse\022/\n\010Shutdown\022" +
+      "\020.ShutdownRequest\032\021.ShutdownResponse\0225\n\n" +
+      "StopMaster\022\022.StopMasterRequest\032\023.StopMas" +
+      "terResponse\022,\n\007Balance\022\017.BalanceRequest\032" +
+      "\020.BalanceResponse\022M\n\022SetBalancerRunning\022" +
+      "\032.SetBalancerRunningRequest\032\033.SetBalance",
+      "rRunningResponse\022A\n\016RunCatalogScan\022\026.Run" +
+      "CatalogScanRequest\032\027.RunCatalogScanRespo" +
+      "nse\022S\n\024EnableCatalogJanitor\022\034.EnableCata" +
+      "logJanitorRequest\032\035.EnableCatalogJanitor" +
+      "Response\022\\\n\027IsCatalogJanitorEnabled\022\037.Is" +
+      "CatalogJanitorEnabledRequest\032 .IsCatalog" +
+      "JanitorEnabledResponse\022L\n\021ExecMasterServ" +
+      "ice\022\032.CoprocessorServiceRequest\032\033.Coproc" +
+      "essorServiceResponse\022/\n\010Snapshot\022\020.Snaps" +
+      "hotRequest\032\021.SnapshotResponse\022V\n\025GetComp",
+      "letedSnapshots\022\035.GetCompletedSnapshotsRe" +
+      "quest\032\036.GetCompletedSnapshotsResponse\022A\n" +
+      "\016DeleteSnapshot\022\026.DeleteSnapshotRequest\032" +
+      "\027.DeleteSnapshotResponse\022A\n\016IsSnapshotDo" +
+      "ne\022\026.IsSnapshotDoneRequest\032\027.IsSnapshotD" +
+      "oneResponse\022D\n\017RestoreSnapshot\022\027.Restore" +
+      "SnapshotRequest\032\030.RestoreSnapshotRespons" +
+      "e\022V\n\025IsRestoreSnapshotDone\022\035.IsRestoreSn" +
+      "apshotDoneRequest\032\036.IsRestoreSnapshotDon" +
+      "eResponse\022>\n\rExecProcedure\022\025.ExecProcedu",
+      "reRequest\032\026.ExecProcedureResponse\022E\n\024Exe" +
+      "cProcedureWithRet\022\025.ExecProcedureRequest" +
+      "\032\026.ExecProcedureResponse\022D\n\017IsProcedureD" +
+      "one\022\027.IsProcedureDoneRequest\032\030.IsProcedu" +
+      "reDoneResponse\022D\n\017ModifyNamespace\022\027.Modi" +
+      "fyNamespaceRequest\032\030.ModifyNamespaceResp" +
+      "onse\022D\n\017CreateNamespace\022\027.CreateNamespac" +
+      "eRequest\032\030.CreateNamespaceResponse\022D\n\017De" +
+      "leteNamespace\022\027.DeleteNamespaceRequest\032\030" +
+      ".DeleteNamespaceResponse\022Y\n\026GetNamespace",
+      "Descriptor\022\036.GetNamespaceDescriptorReque" +
+      "st\032\037.GetNamespaceDescriptorResponse\022_\n\030L" +
+      "istNamespaceDescriptors\022 .ListNamespaceD" +
+      "escriptorsRequest\032!.ListNamespaceDescrip" +
+      "torsResponse\022t\n\037ListTableDescriptorsByNa" +
+      "mespace\022\'.ListTableDescriptorsByNamespac" +
+      "eRequest\032(.ListTableDescriptorsByNamespa" +
+      "ceResponse\022b\n\031ListTableNamesByNamespace\022" +
+      "!.ListTableNamesByNamespaceRequest\032\".Lis" +
+      "tTableNamesByNamespaceResponse\022/\n\010SetQuo",
+      "ta\022\020.SetQuotaRequest\032\021.SetQuotaResponse\022" +
+      "f\n\037getLastMajorCompactionTimestamp\022 .Maj" +
+      "orCompactionTimestampRequest\032!.MajorComp" +
+      "actionTimestampResponse\022x\n(getLastMajorC" +
+      "ompactionTimestampForRegion\022).MajorCompa" +
+      "ctionTimestampForRegionRequest\032!.MajorCo" +
+      "mpactionTimestampResponseBB\n*org.apache." +
+      "hadoop.hbase.protobuf.generatedB\014MasterP" +
+      "rotosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -47941,20 +49820,32 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_IsProcedureDoneResponse_descriptor,
               new java.lang.String[] { "Done", "Snapshot", });
-          internal_static_MajorCompactionTimestampRequest_descriptor =
+          internal_static_SetQuotaRequest_descriptor =
             getDescriptor().getMessageTypes().get(82);
+          internal_static_SetQuotaRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_SetQuotaRequest_descriptor,
+              new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
+          internal_static_SetQuotaResponse_descriptor =
+            getDescriptor().getMessageTypes().get(83);
+          internal_static_SetQuotaResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_SetQuotaResponse_descriptor,
+              new java.lang.String[] { });
+          internal_static_MajorCompactionTimestampRequest_descriptor =
+            getDescriptor().getMessageTypes().get(84);
           internal_static_MajorCompactionTimestampRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_MajorCompactionTimestampRequest_descriptor,
               new java.lang.String[] { "TableName", });
           internal_static_MajorCompactionTimestampForRegionRequest_descriptor =
-            getDescriptor().getMessageTypes().get(83);
+            getDescriptor().getMessageTypes().get(85);
           internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_MajorCompactionTimestampForRegionRequest_descriptor,
               new java.lang.String[] { "Region", });
           internal_static_MajorCompactionTimestampResponse_descriptor =
-            getDescriptor().getMessageTypes().get(84);
+            getDescriptor().getMessageTypes().get(86);
           internal_static_MajorCompactionTimestampResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_MajorCompactionTimestampResponse_descriptor,
@@ -47968,6 +49859,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor(),
         }, assigner);
   }
 
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
new file mode 100644
index 0000000..5eac192
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -0,0 +1,4378 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Quota.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class QuotaProtos {
+  private QuotaProtos() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  /**
+   * Protobuf enum {@code QuotaScope}
+   */
+  public enum QuotaScope
+      implements com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * CLUSTER = 1;
+     */
+    CLUSTER(0, 1),
+    /**
+     * MACHINE = 2;
+     */
+    MACHINE(1, 2),
+    ;
+
+    /**
+     * CLUSTER = 1;
+     */
+    public static final int CLUSTER_VALUE = 1;
+    /**
+     * MACHINE = 2;
+     */
+    public static final int MACHINE_VALUE = 2;
+
+
+    public final int getNumber() { return value; }
+
+    public static QuotaScope valueOf(int value) {
+      switch (value) {
+        case 1: return CLUSTER;
+        case 2: return MACHINE;
+        default: return null;
+      }
+    }
+
+    public static com.google.protobuf.Internal.EnumLiteMap
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap() {
+            public QuotaScope findValueByNumber(int number) {
+              return QuotaScope.valueOf(number);
+            }
+          };
+
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(0);
+    }
+
+    private static final QuotaScope[] VALUES = values();
+
+    public static QuotaScope valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int index;
+    private final int value;
+
+    private QuotaScope(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:QuotaScope)
+  }
+
+  /**
+   * Protobuf enum {@code ThrottleType}
+   */
+  public enum ThrottleType
+      implements com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * REQUEST_NUMBER = 1;
+     */
+    REQUEST_NUMBER(0, 1),
+    /**
+     * REQUEST_SIZE = 2;
+     */
+    REQUEST_SIZE(1, 2),
+    /**
+     * WRITE_NUMBER = 3;
+     */
+    WRITE_NUMBER(2, 3),
+    /**
+     * WRITE_SIZE = 4;
+     */
+    WRITE_SIZE(3, 4),
+    /**
+     * READ_NUMBER = 5;
+     */
+    READ_NUMBER(4, 5),
+    /**
+     * READ_SIZE = 6;
+     */
+    READ_SIZE(5, 6),
+    ;
+
+    /**
+     * REQUEST_NUMBER = 1;
+     */
+    public static final int REQUEST_NUMBER_VALUE = 1;
+    /**
+     * REQUEST_SIZE = 2;
+     */
+    public static final int REQUEST_SIZE_VALUE = 2;
+    /**
+     * WRITE_NUMBER = 3;
+     */
+    public static final int WRITE_NUMBER_VALUE = 3;
+    /**
+     * WRITE_SIZE = 4;
+     */
+    public static final int WRITE_SIZE_VALUE = 4;
+    /**
+     * READ_NUMBER = 5;
+     */
+    public static final int READ_NUMBER_VALUE = 5;
+    /**
+     * READ_SIZE = 6;
+     */
+    public static final int READ_SIZE_VALUE = 6;
+
+
+    public final int getNumber() { return value; }
+
+    public static ThrottleType valueOf(int value) {
+      switch (value) {
+        case 1: return REQUEST_NUMBER;
+        case 2: return REQUEST_SIZE;
+        case 3: return WRITE_NUMBER;
+        case 4: return WRITE_SIZE;
+        case 5: return READ_NUMBER;
+        case 6: return READ_SIZE;
+        default: return null;
+      }
+    }
+
+    public static com.google.protobuf.Internal.EnumLiteMap
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap() {
+            public ThrottleType findValueByNumber(int number) {
+              return ThrottleType.valueOf(number);
+            }
+          };
+
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(1);
+    }
+
+    private static final ThrottleType[] VALUES = values();
+
+    public static ThrottleType valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int index;
+    private final int value;
+
+    private ThrottleType(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:ThrottleType)
+  }
+
+  /**
+   * Protobuf enum {@code QuotaType}
+   */
+  public enum QuotaType
+      implements com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * THROTTLE = 1;
+     */
+    THROTTLE(0, 1),
+    ;
+
+    /**
+     * THROTTLE = 1;
+     */
+    public static final int THROTTLE_VALUE = 1;
+
+
+    public final int getNumber() { return value; }
+
+    public static QuotaType valueOf(int value) {
+      switch (value) {
+        case 1: return THROTTLE;
+        default: return null;
+      }
+    }
+
+    public static com.google.protobuf.Internal.EnumLiteMap
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap() {
+            public QuotaType findValueByNumber(int number) {
+              return QuotaType.valueOf(number);
+            }
+          };
+
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(2);
+    }
+
+    private static final QuotaType[] VALUES = values();
+
+    public static QuotaType valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int index;
+    private final int value;
+
+    private QuotaType(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:QuotaType)
+  }
+
+  public interface TimedQuotaOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .TimeUnit time_unit = 1;
+    /**
+     * required .TimeUnit time_unit = 1;
+     */
+    boolean hasTimeUnit();
+    /**
+     * required .TimeUnit time_unit = 1;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit getTimeUnit();
+
+    // optional uint64 soft_limit = 2;
+    /**
+     * optional uint64 soft_limit = 2;
+     */
+    boolean hasSoftLimit();
+    /**
+     * optional uint64 soft_limit = 2;
+     */
+    long getSoftLimit();
+
+    // optional float share = 3;
+    /**
+     * optional float share = 3;
+     */
+    boolean hasShare();
+    /**
+     * optional float share = 3;
+     */
+    float getShare();
+
+    // optional .QuotaScope scope = 4 [default = MACHINE];
+    /**
+     * optional .QuotaScope scope = 4 [default = MACHINE];
+     */
+    boolean hasScope();
+    /**
+     * optional .QuotaScope scope = 4 [default = MACHINE];
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope getScope();
+  }
+  /**
+   * Protobuf type {@code TimedQuota}
+   */
+  public static final class TimedQuota extends
+      com.google.protobuf.GeneratedMessage
+      implements TimedQuotaOrBuilder {
+    // Use TimedQuota.newBuilder() to construct.
+    private TimedQuota(com.google.protobuf.GeneratedMessage.Builder builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TimedQuota(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TimedQuota defaultInstance;
+    public static TimedQuota getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TimedQuota getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TimedQuota(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                timeUnit_ = value;
+              }
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              softLimit_ = input.readUInt64();
+              break;
+            }
+            case 29: {
+              bitField0_ |= 0x00000004;
+              share_ = input.readFloat();
+              break;
+            }
+            case 32: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope value = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(4, rawValue);
+              } else {
+                bitField0_ |= 0x00000008;
+                scope_ = value;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_TimedQuota_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_TimedQuota_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser PARSER =
+        new com.google.protobuf.AbstractParser() {
+      public TimedQuota parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TimedQuota(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .TimeUnit time_unit = 1;
+    public static final int TIME_UNIT_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit timeUnit_;
+    /**
+     * required .TimeUnit time_unit = 1;
+     */
+    public boolean hasTimeUnit() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * required .TimeUnit time_unit = 1;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit getTimeUnit() {
+      return timeUnit_;
+    }
+
+    // optional uint64 soft_limit = 2;
+    public static final int SOFT_LIMIT_FIELD_NUMBER = 2;
+    private long softLimit_;
+    /**
+     * optional uint64 soft_limit = 2;
+     */
+    public boolean hasSoftLimit() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * optional uint64 soft_limit = 2;
+     */
+    public long getSoftLimit() {
+      return softLimit_;
+    }
+
+    // optional float share = 3;
+    public static final int SHARE_FIELD_NUMBER = 3;
+    private float share_;
+    /**
+     * optional float share = 3;
+     */
+    public boolean hasShare() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * optional float share = 3;
+     */
+    public float getShare() {
+      return share_;
+    }
+
+    // optional .QuotaScope scope = 4 [default = MACHINE];
+    public static final int SCOPE_FIELD_NUMBER = 4;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope scope_;
+    /**
+     * optional .QuotaScope scope = 4 [default = MACHINE];
+     */
+    public boolean hasScope() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * optional .QuotaScope scope = 4 [default = MACHINE];
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope getScope() {
+      return scope_;
+    }
+
+    private void initFields() {
+      timeUnit_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit.NANOSECONDS;
+      softLimit_ = 0L;
+      share_ = 0F;
+      scope_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope.MACHINE;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasTimeUnit()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, timeUnit_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeUInt64(2, softLimit_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeFloat(3, share_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeEnum(4, scope_.getNumber());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, timeUnit_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(2, softLimit_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeFloatSize(3, share_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(4, scope_.getNumber());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota) obj;
+
+      boolean result = true;
+      result = result && (hasTimeUnit() == other.hasTimeUnit());
+      if (hasTimeUnit()) {
+        result = result &&
+            (getTimeUnit() == other.getTimeUnit());
+      }
+      result = result && (hasSoftLimit() == other.hasSoftLimit());
+      if (hasSoftLimit()) {
+        result = result && (getSoftLimit()
+            == other.getSoftLimit());
+      }
+      result = result && (hasShare() == other.hasShare());
+      if (hasShare()) {
+        result = result && (Float.floatToIntBits(getShare())    == Float.floatToIntBits(other.getShare()));
+      }
+      result = result && (hasScope() == other.hasScope());
+      if (hasScope()) {
+        result = result &&
+            (getScope() == other.getScope());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasTimeUnit()) {
+        hash = (37 * hash) + TIME_UNIT_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getTimeUnit());
+      }
+      if (hasSoftLimit()) {
+        hash = (37 * hash) + SOFT_LIMIT_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getSoftLimit());
+      }
+      if (hasShare()) {
+        hash = (37 * hash) + SHARE_FIELD_NUMBER;
+        hash = (53 * hash) + Float.floatToIntBits(
+            getShare());
+      }
+      if (hasScope()) {
+        hash = (37 * hash) + SCOPE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getScope());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code TimedQuota}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_TimedQuota_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_TimedQuota_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        timeUnit_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit.NANOSECONDS;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        softLimit_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        share_ = 0F;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        scope_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope.MACHINE;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_TimedQuota_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota build() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.timeUnit_ = timeUnit_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.softLimit_ = softLimit_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.share_ = share_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.scope_ = scope_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) return this;
+        if (other.hasTimeUnit()) {
+          setTimeUnit(other.getTimeUnit());
+        }
+        if (other.hasSoftLimit()) {
+          setSoftLimit(other.getSoftLimit());
+        }
+        if (other.hasShare()) {
+          setShare(other.getShare());
+        }
+        if (other.hasScope()) {
+          setScope(other.getScope());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasTimeUnit()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .TimeUnit time_unit = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit timeUnit_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit.NANOSECONDS;
+      /**
+       * required .TimeUnit time_unit = 1;
+       */
+      public boolean hasTimeUnit() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * required .TimeUnit time_unit = 1;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit getTimeUnit() {
+        return timeUnit_;
+      }
+      /**
+       * required .TimeUnit time_unit = 1;
+       */
+      public Builder setTimeUnit(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        timeUnit_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * required .TimeUnit time_unit = 1;
+       */
+      public Builder clearTimeUnit() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        timeUnit_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit.NANOSECONDS;
+        onChanged();
+        return this;
+      }
+
+      // optional uint64 soft_limit = 2;
+      private long softLimit_ ;
+      /**
+       * optional uint64 soft_limit = 2;
+       */
+      public boolean hasSoftLimit() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * optional uint64 soft_limit = 2;
+       */
+      public long getSoftLimit() {
+        return softLimit_;
+      }
+      /**
+       * optional uint64 soft_limit = 2;
+       */
+      public Builder setSoftLimit(long value) {
+        bitField0_ |= 0x00000002;
+        softLimit_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional uint64 soft_limit = 2;
+       */
+      public Builder clearSoftLimit() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        softLimit_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // optional float share = 3;
+      private float share_ ;
+      /**
+       * optional float share = 3;
+       */
+      public boolean hasShare() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * optional float share = 3;
+       */
+      public float getShare() {
+        return share_;
+      }
+      /**
+       * optional float share = 3;
+       */
+      public Builder setShare(float value) {
+        bitField0_ |= 0x00000004;
+        share_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional float share = 3;
+       */
+      public Builder clearShare() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        share_ = 0F;
+        onChanged();
+        return this;
+      }
+
+      // optional .QuotaScope scope = 4 [default = MACHINE];
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope scope_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope.MACHINE;
+      /**
+       * optional .QuotaScope scope = 4 [default = MACHINE];
+       */
+      public boolean hasScope() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * optional .QuotaScope scope = 4 [default = MACHINE];
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope getScope() {
+        return scope_;
+      }
+      /**
+       * optional .QuotaScope scope = 4 [default = MACHINE];
+       */
+      public Builder setScope(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000008;
+        scope_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional .QuotaScope scope = 4 [default = MACHINE];
+       */
+      public Builder clearScope() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        scope_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope.MACHINE;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:TimedQuota)
+    }
+
+    static {
+      defaultInstance = new TimedQuota(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:TimedQuota)
+  }
+
+  public interface ThrottleOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional .TimedQuota req_num = 1;
+    /**
+     * optional .TimedQuota req_num = 1;
+     */
+    boolean hasReqNum();
+    /**
+     * optional .TimedQuota req_num = 1;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReqNum();
+    /**
+     * optional .TimedQuota req_num = 1;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReqNumOrBuilder();
+
+    // optional .TimedQuota req_size = 2;
+    /**
+     * optional .TimedQuota req_size = 2;
+     */
+    boolean hasReqSize();
+    /**
+     * optional .TimedQuota req_size = 2;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReqSize();
+    /**
+     * optional .TimedQuota req_size = 2;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReqSizeOrBuilder();
+
+    // optional .TimedQuota write_num = 3;
+    /**
+     * optional .TimedQuota write_num = 3;
+     */
+    boolean hasWriteNum();
+    /**
+     * optional .TimedQuota write_num = 3;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getWriteNum();
+    /**
+     * optional .TimedQuota write_num = 3;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getWriteNumOrBuilder();
+
+    // optional .TimedQuota write_size = 4;
+    /**
+     * optional .TimedQuota write_size = 4;
+     */
+    boolean hasWriteSize();
+    /**
+     * optional .TimedQuota write_size = 4;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getWriteSize();
+    /**
+     * optional .TimedQuota write_size = 4;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getWriteSizeOrBuilder();
+
+    // optional .TimedQuota read_num = 5;
+    /**
+     * optional .TimedQuota read_num = 5;
+     */
+    boolean hasReadNum();
+    /**
+     * optional .TimedQuota read_num = 5;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReadNum();
+    /**
+     * optional .TimedQuota read_num = 5;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReadNumOrBuilder();
+
+    // optional .TimedQuota read_size = 6;
+    /**
+     * optional .TimedQuota read_size = 6;
+     */
+    boolean hasReadSize();
+    /**
+     * optional .TimedQuota read_size = 6;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReadSize();
+    /**
+     * optional .TimedQuota read_size = 6;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReadSizeOrBuilder();
+  }
+  /**
+   * Protobuf type {@code Throttle}
+   */
+  public static final class Throttle extends
+      com.google.protobuf.GeneratedMessage
+      implements ThrottleOrBuilder {
+    // Use Throttle.newBuilder() to construct.
+    private Throttle(com.google.protobuf.GeneratedMessage.Builder builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private Throttle(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final Throttle defaultInstance;
+    public static Throttle getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public Throttle getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private Throttle(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = reqNum_.toBuilder();
+              }
+              reqNum_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(reqNum_);
+                reqNum_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = reqSize_.toBuilder();
+              }
+              reqSize_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(reqSize_);
+                reqSize_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+            case 26: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000004) == 0x00000004)) {
+                subBuilder = writeNum_.toBuilder();
+              }
+              writeNum_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(writeNum_);
+                writeNum_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000004;
+              break;
+            }
+            case 34: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000008) == 0x00000008)) {
+                subBuilder = writeSize_.toBuilder();
+              }
+              writeSize_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(writeSize_);
+                writeSize_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000008;
+              break;
+            }
+            case 42: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000010) == 0x00000010)) {
+                subBuilder = readNum_.toBuilder();
+              }
+              readNum_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(readNum_);
+                readNum_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000010;
+              break;
+            }
+            case 50: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000020) == 0x00000020)) {
+                subBuilder = readSize_.toBuilder();
+              }
+              readSize_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(readSize_);
+                readSize_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000020;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Throttle_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Throttle_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser PARSER =
+        new com.google.protobuf.AbstractParser() {
+      public Throttle parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new Throttle(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional .TimedQuota req_num = 1;
+    public static final int REQ_NUM_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota reqNum_;
+    /**
+     * optional .TimedQuota req_num = 1;
+     */
+    public boolean hasReqNum() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * optional .TimedQuota req_num = 1;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReqNum() {
+      return reqNum_;
+    }
+    /**
+     * optional .TimedQuota req_num = 1;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReqNumOrBuilder() {
+      return reqNum_;
+    }
+
+    // optional .TimedQuota req_size = 2;
+    public static final int REQ_SIZE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota reqSize_;
+    /**
+     * optional .TimedQuota req_size = 2;
+     */
+    public boolean hasReqSize() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * optional .TimedQuota req_size = 2;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReqSize() {
+      return reqSize_;
+    }
+    /**
+     * optional .TimedQuota req_size = 2;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReqSizeOrBuilder() {
+      return reqSize_;
+    }
+
+    // optional .TimedQuota write_num = 3;
+    public static final int WRITE_NUM_FIELD_NUMBER = 3;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota writeNum_;
+    /**
+     * optional .TimedQuota write_num = 3;
+     */
+    public boolean hasWriteNum() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * optional .TimedQuota write_num = 3;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getWriteNum() {
+      return writeNum_;
+    }
+    /**
+     * optional .TimedQuota write_num = 3;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getWriteNumOrBuilder() {
+      return writeNum_;
+    }
+
+    // optional .TimedQuota write_size = 4;
+    public static final int WRITE_SIZE_FIELD_NUMBER = 4;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota writeSize_;
+    /**
+     * optional .TimedQuota write_size = 4;
+     */
+    public boolean hasWriteSize() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * optional .TimedQuota write_size = 4;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getWriteSize() {
+      return writeSize_;
+    }
+    /**
+     * optional .TimedQuota write_size = 4;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getWriteSizeOrBuilder() {
+      return writeSize_;
+    }
+
+    // optional .TimedQuota read_num = 5;
+    public static final int READ_NUM_FIELD_NUMBER = 5;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota readNum_;
+    /**
+     * optional .TimedQuota read_num = 5;
+     */
+    public boolean hasReadNum() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    /**
+     * optional .TimedQuota read_num = 5;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReadNum() {
+      return readNum_;
+    }
+    /**
+     * optional .TimedQuota read_num = 5;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReadNumOrBuilder() {
+      return readNum_;
+    }
+
+    // optional .TimedQuota read_size = 6;
+    public static final int READ_SIZE_FIELD_NUMBER = 6;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota readSize_;
+    /**
+     * optional .TimedQuota read_size = 6;
+     */
+    public boolean hasReadSize() {
+      return ((bitField0_ & 0x00000020) == 0x00000020);
+    }
+    /**
+     * optional .TimedQuota read_size = 6;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReadSize() {
+      return readSize_;
+    }
+    /**
+     * optional .TimedQuota read_size = 6;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReadSizeOrBuilder() {
+      return readSize_;
+    }
+
+    private void initFields() {
+      reqNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      reqSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      writeNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      writeSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      readNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      readSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (hasReqNum()) {
+        if (!getReqNum().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      if (hasReqSize()) {
+        if (!getReqSize().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      if (hasWriteNum()) {
+        if (!getWriteNum().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      if (hasWriteSize()) {
+        if (!getWriteSize().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      if (hasReadNum()) {
+        if (!getReadNum().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      if (hasReadSize()) {
+        if (!getReadSize().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, reqNum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, reqSize_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeMessage(3, writeNum_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeMessage(4, writeSize_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeMessage(5, readNum_);
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        output.writeMessage(6, readSize_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, reqNum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, reqSize_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, writeNum_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(4, writeSize_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(5, readNum_);
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(6, readSize_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle) obj;
+
+      boolean result = true;
+      result = result && (hasReqNum() == other.hasReqNum());
+      if (hasReqNum()) {
+        result = result && getReqNum()
+            .equals(other.getReqNum());
+      }
+      result = result && (hasReqSize() == other.hasReqSize());
+      if (hasReqSize()) {
+        result = result && getReqSize()
+            .equals(other.getReqSize());
+      }
+      result = result && (hasWriteNum() == other.hasWriteNum());
+      if (hasWriteNum()) {
+        result = result && getWriteNum()
+            .equals(other.getWriteNum());
+      }
+      result = result && (hasWriteSize() == other.hasWriteSize());
+      if (hasWriteSize()) {
+        result = result && getWriteSize()
+            .equals(other.getWriteSize());
+      }
+      result = result && (hasReadNum() == other.hasReadNum());
+      if (hasReadNum()) {
+        result = result && getReadNum()
+            .equals(other.getReadNum());
+      }
+      result = result && (hasReadSize() == other.hasReadSize());
+      if (hasReadSize()) {
+        result = result && getReadSize()
+            .equals(other.getReadSize());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasReqNum()) {
+        hash = (37 * hash) + REQ_NUM_FIELD_NUMBER;
+        hash = (53 * hash) + getReqNum().hashCode();
+      }
+      if (hasReqSize()) {
+        hash = (37 * hash) + REQ_SIZE_FIELD_NUMBER;
+        hash = (53 * hash) + getReqSize().hashCode();
+      }
+      if (hasWriteNum()) {
+        hash = (37 * hash) + WRITE_NUM_FIELD_NUMBER;
+        hash = (53 * hash) + getWriteNum().hashCode();
+      }
+      if (hasWriteSize()) {
+        hash = (37 * hash) + WRITE_SIZE_FIELD_NUMBER;
+        hash = (53 * hash) + getWriteSize().hashCode();
+      }
+      if (hasReadNum()) {
+        hash = (37 * hash) + READ_NUM_FIELD_NUMBER;
+        hash = (53 * hash) + getReadNum().hashCode();
+      }
+      if (hasReadSize()) {
+        hash = (37 * hash) + READ_SIZE_FIELD_NUMBER;
+        hash = (53 * hash) + getReadSize().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code Throttle}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Throttle_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Throttle_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getReqNumFieldBuilder();
+          getReqSizeFieldBuilder();
+          getWriteNumFieldBuilder();
+          getWriteSizeFieldBuilder();
+          getReadNumFieldBuilder();
+          getReadSizeFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (reqNumBuilder_ == null) {
+          reqNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+        } else {
+          reqNumBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (reqSizeBuilder_ == null) {
+          reqSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+        } else {
+          reqSizeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        if (writeNumBuilder_ == null) {
+          writeNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+        } else {
+          writeNumBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        if (writeSizeBuilder_ == null) {
+          writeSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+        } else {
+          writeSizeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
+        if (readNumBuilder_ == null) {
+          readNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+        } else {
+          readNumBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000010);
+        if (readSizeBuilder_ == null) {
+          readSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+        } else {
+          readSizeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000020);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Throttle_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle build() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (reqNumBuilder_ == null) {
+          result.reqNum_ = reqNum_;
+        } else {
+          result.reqNum_ = reqNumBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (reqSizeBuilder_ == null) {
+          result.reqSize_ = reqSize_;
+        } else {
+          result.reqSize_ = reqSizeBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        if (writeNumBuilder_ == null) {
+          result.writeNum_ = writeNum_;
+        } else {
+          result.writeNum_ = writeNumBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        if (writeSizeBuilder_ == null) {
+          result.writeSize_ = writeSize_;
+        } else {
+          result.writeSize_ = writeSizeBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        if (readNumBuilder_ == null) {
+          result.readNum_ = readNum_;
+        } else {
+          result.readNum_ = readNumBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+          to_bitField0_ |= 0x00000020;
+        }
+        if (readSizeBuilder_ == null) {
+          result.readSize_ = readSize_;
+        } else {
+          result.readSize_ = readSizeBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance()) return this;
+        if (other.hasReqNum()) {
+          mergeReqNum(other.getReqNum());
+        }
+        if (other.hasReqSize()) {
+          mergeReqSize(other.getReqSize());
+        }
+        if (other.hasWriteNum()) {
+          mergeWriteNum(other.getWriteNum());
+        }
+        if (other.hasWriteSize()) {
+          mergeWriteSize(other.getWriteSize());
+        }
+        if (other.hasReadNum()) {
+          mergeReadNum(other.getReadNum());
+        }
+        if (other.hasReadSize()) {
+          mergeReadSize(other.getReadSize());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (hasReqNum()) {
+          if (!getReqNum().isInitialized()) {
+            
+            return false;
+          }
+        }
+        if (hasReqSize()) {
+          if (!getReqSize().isInitialized()) {
+            
+            return false;
+          }
+        }
+        if (hasWriteNum()) {
+          if (!getWriteNum().isInitialized()) {
+            
+            return false;
+          }
+        }
+        if (hasWriteSize()) {
+          if (!getWriteSize().isInitialized()) {
+            
+            return false;
+          }
+        }
+        if (hasReadNum()) {
+          if (!getReadNum().isInitialized()) {
+            
+            return false;
+          }
+        }
+        if (hasReadSize()) {
+          if (!getReadSize().isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional .TimedQuota req_num = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota reqNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> reqNumBuilder_;
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public boolean hasReqNum() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReqNum() {
+        if (reqNumBuilder_ == null) {
+          return reqNum_;
+        } else {
+          return reqNumBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public Builder setReqNum(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (reqNumBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          reqNum_ = value;
+          onChanged();
+        } else {
+          reqNumBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public Builder setReqNum(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder builderForValue) {
+        if (reqNumBuilder_ == null) {
+          reqNum_ = builderForValue.build();
+          onChanged();
+        } else {
+          reqNumBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public Builder mergeReqNum(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (reqNumBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              reqNum_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) {
+            reqNum_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(reqNum_).mergeFrom(value).buildPartial();
+          } else {
+            reqNum_ = value;
+          }
+          onChanged();
+        } else {
+          reqNumBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public Builder clearReqNum() {
+        if (reqNumBuilder_ == null) {
+          reqNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          reqNumBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder getReqNumBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getReqNumFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReqNumOrBuilder() {
+        if (reqNumBuilder_ != null) {
+          return reqNumBuilder_.getMessageOrBuilder();
+        } else {
+          return reqNum_;
+        }
+      }
+      /**
+       * optional .TimedQuota req_num = 1;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+          getReqNumFieldBuilder() {
+        if (reqNumBuilder_ == null) {
+          reqNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>(
+                  reqNum_,
+                  getParentForChildren(),
+                  isClean());
+          reqNum_ = null;
+        }
+        return reqNumBuilder_;
+      }
+
+      // optional .TimedQuota req_size = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota reqSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> reqSizeBuilder_;
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public boolean hasReqSize() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReqSize() {
+        if (reqSizeBuilder_ == null) {
+          return reqSize_;
+        } else {
+          return reqSizeBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public Builder setReqSize(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (reqSizeBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          reqSize_ = value;
+          onChanged();
+        } else {
+          reqSizeBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public Builder setReqSize(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder builderForValue) {
+        if (reqSizeBuilder_ == null) {
+          reqSize_ = builderForValue.build();
+          onChanged();
+        } else {
+          reqSizeBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public Builder mergeReqSize(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (reqSizeBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              reqSize_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) {
+            reqSize_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(reqSize_).mergeFrom(value).buildPartial();
+          } else {
+            reqSize_ = value;
+          }
+          onChanged();
+        } else {
+          reqSizeBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public Builder clearReqSize() {
+        if (reqSizeBuilder_ == null) {
+          reqSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          reqSizeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder getReqSizeBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getReqSizeFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReqSizeOrBuilder() {
+        if (reqSizeBuilder_ != null) {
+          return reqSizeBuilder_.getMessageOrBuilder();
+        } else {
+          return reqSize_;
+        }
+      }
+      /**
+       * optional .TimedQuota req_size = 2;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+          getReqSizeFieldBuilder() {
+        if (reqSizeBuilder_ == null) {
+          reqSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>(
+                  reqSize_,
+                  getParentForChildren(),
+                  isClean());
+          reqSize_ = null;
+        }
+        return reqSizeBuilder_;
+      }
+
+      // optional .TimedQuota write_num = 3;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota writeNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> writeNumBuilder_;
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public boolean hasWriteNum() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getWriteNum() {
+        if (writeNumBuilder_ == null) {
+          return writeNum_;
+        } else {
+          return writeNumBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public Builder setWriteNum(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (writeNumBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          writeNum_ = value;
+          onChanged();
+        } else {
+          writeNumBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public Builder setWriteNum(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder builderForValue) {
+        if (writeNumBuilder_ == null) {
+          writeNum_ = builderForValue.build();
+          onChanged();
+        } else {
+          writeNumBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public Builder mergeWriteNum(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (writeNumBuilder_ == null) {
+          if (((bitField0_ & 0x00000004) == 0x00000004) &&
+              writeNum_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) {
+            writeNum_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(writeNum_).mergeFrom(value).buildPartial();
+          } else {
+            writeNum_ = value;
+          }
+          onChanged();
+        } else {
+          writeNumBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public Builder clearWriteNum() {
+        if (writeNumBuilder_ == null) {
+          writeNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          writeNumBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder getWriteNumBuilder() {
+        bitField0_ |= 0x00000004;
+        onChanged();
+        return getWriteNumFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getWriteNumOrBuilder() {
+        if (writeNumBuilder_ != null) {
+          return writeNumBuilder_.getMessageOrBuilder();
+        } else {
+          return writeNum_;
+        }
+      }
+      /**
+       * optional .TimedQuota write_num = 3;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+          getWriteNumFieldBuilder() {
+        if (writeNumBuilder_ == null) {
+          writeNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>(
+                  writeNum_,
+                  getParentForChildren(),
+                  isClean());
+          writeNum_ = null;
+        }
+        return writeNumBuilder_;
+      }
+
+      // optional .TimedQuota write_size = 4;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota writeSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> writeSizeBuilder_;
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public boolean hasWriteSize() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getWriteSize() {
+        if (writeSizeBuilder_ == null) {
+          return writeSize_;
+        } else {
+          return writeSizeBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public Builder setWriteSize(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (writeSizeBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          writeSize_ = value;
+          onChanged();
+        } else {
+          writeSizeBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public Builder setWriteSize(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder builderForValue) {
+        if (writeSizeBuilder_ == null) {
+          writeSize_ = builderForValue.build();
+          onChanged();
+        } else {
+          writeSizeBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public Builder mergeWriteSize(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (writeSizeBuilder_ == null) {
+          if (((bitField0_ & 0x00000008) == 0x00000008) &&
+              writeSize_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) {
+            writeSize_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(writeSize_).mergeFrom(value).buildPartial();
+          } else {
+            writeSize_ = value;
+          }
+          onChanged();
+        } else {
+          writeSizeBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public Builder clearWriteSize() {
+        if (writeSizeBuilder_ == null) {
+          writeSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          writeSizeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
+        return this;
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder getWriteSizeBuilder() {
+        bitField0_ |= 0x00000008;
+        onChanged();
+        return getWriteSizeFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getWriteSizeOrBuilder() {
+        if (writeSizeBuilder_ != null) {
+          return writeSizeBuilder_.getMessageOrBuilder();
+        } else {
+          return writeSize_;
+        }
+      }
+      /**
+       * optional .TimedQuota write_size = 4;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+          getWriteSizeFieldBuilder() {
+        if (writeSizeBuilder_ == null) {
+          writeSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>(
+                  writeSize_,
+                  getParentForChildren(),
+                  isClean());
+          writeSize_ = null;
+        }
+        return writeSizeBuilder_;
+      }
+
+      // optional .TimedQuota read_num = 5;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota readNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> readNumBuilder_;
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public boolean hasReadNum() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReadNum() {
+        if (readNumBuilder_ == null) {
+          return readNum_;
+        } else {
+          return readNumBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public Builder setReadNum(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (readNumBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          readNum_ = value;
+          onChanged();
+        } else {
+          readNumBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000010;
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public Builder setReadNum(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder builderForValue) {
+        if (readNumBuilder_ == null) {
+          readNum_ = builderForValue.build();
+          onChanged();
+        } else {
+          readNumBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000010;
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public Builder mergeReadNum(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (readNumBuilder_ == null) {
+          if (((bitField0_ & 0x00000010) == 0x00000010) &&
+              readNum_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) {
+            readNum_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(readNum_).mergeFrom(value).buildPartial();
+          } else {
+            readNum_ = value;
+          }
+          onChanged();
+        } else {
+          readNumBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000010;
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public Builder clearReadNum() {
+        if (readNumBuilder_ == null) {
+          readNum_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          readNumBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000010);
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder getReadNumBuilder() {
+        bitField0_ |= 0x00000010;
+        onChanged();
+        return getReadNumFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReadNumOrBuilder() {
+        if (readNumBuilder_ != null) {
+          return readNumBuilder_.getMessageOrBuilder();
+        } else {
+          return readNum_;
+        }
+      }
+      /**
+       * optional .TimedQuota read_num = 5;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+          getReadNumFieldBuilder() {
+        if (readNumBuilder_ == null) {
+          readNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>(
+                  readNum_,
+                  getParentForChildren(),
+                  isClean());
+          readNum_ = null;
+        }
+        return readNumBuilder_;
+      }
+
+      // optional .TimedQuota read_size = 6;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota readSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> readSizeBuilder_;
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public boolean hasReadSize() {
+        return ((bitField0_ & 0x00000020) == 0x00000020);
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getReadSize() {
+        if (readSizeBuilder_ == null) {
+          return readSize_;
+        } else {
+          return readSizeBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public Builder setReadSize(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (readSizeBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          readSize_ = value;
+          onChanged();
+        } else {
+          readSizeBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000020;
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public Builder setReadSize(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder builderForValue) {
+        if (readSizeBuilder_ == null) {
+          readSize_ = builderForValue.build();
+          onChanged();
+        } else {
+          readSizeBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000020;
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public Builder mergeReadSize(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (readSizeBuilder_ == null) {
+          if (((bitField0_ & 0x00000020) == 0x00000020) &&
+              readSize_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) {
+            readSize_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(readSize_).mergeFrom(value).buildPartial();
+          } else {
+            readSize_ = value;
+          }
+          onChanged();
+        } else {
+          readSizeBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000020;
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public Builder clearReadSize() {
+        if (readSizeBuilder_ == null) {
+          readSize_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          readSizeBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000020);
+        return this;
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder getReadSizeBuilder() {
+        bitField0_ |= 0x00000020;
+        onChanged();
+        return getReadSizeFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getReadSizeOrBuilder() {
+        if (readSizeBuilder_ != null) {
+          return readSizeBuilder_.getMessageOrBuilder();
+        } else {
+          return readSize_;
+        }
+      }
+      /**
+       * optional .TimedQuota read_size = 6;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+          getReadSizeFieldBuilder() {
+        if (readSizeBuilder_ == null) {
+          readSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>(
+                  readSize_,
+                  getParentForChildren(),
+                  isClean());
+          readSize_ = null;
+        }
+        return readSizeBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:Throttle)
+    }
+
+    static {
+      defaultInstance = new Throttle(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:Throttle)
+  }
+
+  public interface ThrottleRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional .ThrottleType type = 1;
+    /**
+     * optional .ThrottleType type = 1;
+     */
+    boolean hasType();
+    /**
+     * optional .ThrottleType type = 1;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType getType();
+
+    // optional .TimedQuota timed_quota = 2;
+    /**
+     * optional .TimedQuota timed_quota = 2;
+     */
+    boolean hasTimedQuota();
+    /**
+     * optional .TimedQuota timed_quota = 2;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getTimedQuota();
+    /**
+     * optional .TimedQuota timed_quota = 2;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getTimedQuotaOrBuilder();
+  }
+  /**
+   * Protobuf type {@code ThrottleRequest}
+   */
+  public static final class ThrottleRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements ThrottleRequestOrBuilder {
+    // Use ThrottleRequest.newBuilder() to construct.
+    private ThrottleRequest(com.google.protobuf.GeneratedMessage.Builder builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private ThrottleRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final ThrottleRequest defaultInstance;
+    public static ThrottleRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public ThrottleRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private ThrottleRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType value = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                type_ = value;
+              }
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = timedQuota_.toBuilder();
+              }
+              timedQuota_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(timedQuota_);
+                timedQuota_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_ThrottleRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_ThrottleRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser PARSER =
+        new com.google.protobuf.AbstractParser() {
+      public ThrottleRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new ThrottleRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional .ThrottleType type = 1;
+    public static final int TYPE_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType type_;
+    /**
+     * optional .ThrottleType type = 1;
+     */
+    public boolean hasType() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * optional .ThrottleType type = 1;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType getType() {
+      return type_;
+    }
+
+    // optional .TimedQuota timed_quota = 2;
+    public static final int TIMED_QUOTA_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota timedQuota_;
+    /**
+     * optional .TimedQuota timed_quota = 2;
+     */
+    public boolean hasTimedQuota() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * optional .TimedQuota timed_quota = 2;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getTimedQuota() {
+      return timedQuota_;
+    }
+    /**
+     * optional .TimedQuota timed_quota = 2;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getTimedQuotaOrBuilder() {
+      return timedQuota_;
+    }
+
+    private void initFields() {
+      type_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType.REQUEST_NUMBER;
+      timedQuota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (hasTimedQuota()) {
+        if (!getTimedQuota().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, type_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, timedQuota_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, type_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, timedQuota_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest) obj;
+
+      boolean result = true;
+      result = result && (hasType() == other.hasType());
+      if (hasType()) {
+        result = result &&
+            (getType() == other.getType());
+      }
+      result = result && (hasTimedQuota() == other.hasTimedQuota());
+      if (hasTimedQuota()) {
+        result = result && getTimedQuota()
+            .equals(other.getTimedQuota());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasType()) {
+        hash = (37 * hash) + TYPE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getType());
+      }
+      if (hasTimedQuota()) {
+        hash = (37 * hash) + TIMED_QUOTA_FIELD_NUMBER;
+        hash = (53 * hash) + getTimedQuota().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code ThrottleRequest}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_ThrottleRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_ThrottleRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getTimedQuotaFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        type_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType.REQUEST_NUMBER;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (timedQuotaBuilder_ == null) {
+          timedQuota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+        } else {
+          timedQuotaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_ThrottleRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.type_ = type_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (timedQuotaBuilder_ == null) {
+          result.timedQuota_ = timedQuota_;
+        } else {
+          result.timedQuota_ = timedQuotaBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance()) return this;
+        if (other.hasType()) {
+          setType(other.getType());
+        }
+        if (other.hasTimedQuota()) {
+          mergeTimedQuota(other.getTimedQuota());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (hasTimedQuota()) {
+          if (!getTimedQuota().isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional .ThrottleType type = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType type_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType.REQUEST_NUMBER;
+      /**
+       * optional .ThrottleType type = 1;
+       */
+      public boolean hasType() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * optional .ThrottleType type = 1;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType getType() {
+        return type_;
+      }
+      /**
+       * optional .ThrottleType type = 1;
+       */
+      public Builder setType(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        type_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional .ThrottleType type = 1;
+       */
+      public Builder clearType() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        type_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType.REQUEST_NUMBER;
+        onChanged();
+        return this;
+      }
+
+      // optional .TimedQuota timed_quota = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota timedQuota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> timedQuotaBuilder_;
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public boolean hasTimedQuota() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota getTimedQuota() {
+        if (timedQuotaBuilder_ == null) {
+          return timedQuota_;
+        } else {
+          return timedQuotaBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public Builder setTimedQuota(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (timedQuotaBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          timedQuota_ = value;
+          onChanged();
+        } else {
+          timedQuotaBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public Builder setTimedQuota(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder builderForValue) {
+        if (timedQuotaBuilder_ == null) {
+          timedQuota_ = builderForValue.build();
+          onChanged();
+        } else {
+          timedQuotaBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public Builder mergeTimedQuota(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota value) {
+        if (timedQuotaBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              timedQuota_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) {
+            timedQuota_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(timedQuota_).mergeFrom(value).buildPartial();
+          } else {
+            timedQuota_ = value;
+          }
+          onChanged();
+        } else {
+          timedQuotaBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public Builder clearTimedQuota() {
+        if (timedQuotaBuilder_ == null) {
+          timedQuota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          timedQuotaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder getTimedQuotaBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getTimedQuotaFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder getTimedQuotaOrBuilder() {
+        if (timedQuotaBuilder_ != null) {
+          return timedQuotaBuilder_.getMessageOrBuilder();
+        } else {
+          return timedQuota_;
+        }
+      }
+      /**
+       * optional .TimedQuota timed_quota = 2;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+          getTimedQuotaFieldBuilder() {
+        if (timedQuotaBuilder_ == null) {
+          timedQuotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>(
+                  timedQuota_,
+                  getParentForChildren(),
+                  isClean());
+          timedQuota_ = null;
+        }
+        return timedQuotaBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:ThrottleRequest)
+    }
+
+    static {
+      defaultInstance = new ThrottleRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:ThrottleRequest)
+  }
+
+  public interface QuotasOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional bool bypass_globals = 1 [default = false];
+    /**
+     * optional bool bypass_globals = 1 [default = false];
+     */
+    boolean hasBypassGlobals();
+    /**
+     * optional bool bypass_globals = 1 [default = false];
+     */
+    boolean getBypassGlobals();
+
+    // optional .Throttle throttle = 2;
+    /**
+     * optional .Throttle throttle = 2;
+     */
+    boolean hasThrottle();
+    /**
+     * optional .Throttle throttle = 2;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle getThrottle();
+    /**
+     * optional .Throttle throttle = 2;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder getThrottleOrBuilder();
+  }
+  /**
+   * Protobuf type {@code Quotas}
+   */
+  public static final class Quotas extends
+      com.google.protobuf.GeneratedMessage
+      implements QuotasOrBuilder {
+    // Use Quotas.newBuilder() to construct.
+    private Quotas(com.google.protobuf.GeneratedMessage.Builder builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private Quotas(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final Quotas defaultInstance;
+    public static Quotas getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public Quotas getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private Quotas(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              bypassGlobals_ = input.readBool();
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = throttle_.toBuilder();
+              }
+              throttle_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(throttle_);
+                throttle_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Quotas_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Quotas_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser PARSER =
+        new com.google.protobuf.AbstractParser() {
+      public Quotas parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new Quotas(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional bool bypass_globals = 1 [default = false];
+    public static final int BYPASS_GLOBALS_FIELD_NUMBER = 1;
+    private boolean bypassGlobals_;
+    /**
+     * optional bool bypass_globals = 1 [default = false];
+     */
+    public boolean hasBypassGlobals() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * optional bool bypass_globals = 1 [default = false];
+     */
+    public boolean getBypassGlobals() {
+      return bypassGlobals_;
+    }
+
+    // optional .Throttle throttle = 2;
+    public static final int THROTTLE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle throttle_;
+    /**
+     * optional .Throttle throttle = 2;
+     */
+    public boolean hasThrottle() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * optional .Throttle throttle = 2;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle getThrottle() {
+      return throttle_;
+    }
+    /**
+     * optional .Throttle throttle = 2;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder getThrottleOrBuilder() {
+      return throttle_;
+    }
+
+    private void initFields() {
+      bypassGlobals_ = false;
+      throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (hasThrottle()) {
+        if (!getThrottle().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, bypassGlobals_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, throttle_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, bypassGlobals_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, throttle_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas) obj;
+
+      boolean result = true;
+      result = result && (hasBypassGlobals() == other.hasBypassGlobals());
+      if (hasBypassGlobals()) {
+        result = result && (getBypassGlobals()
+            == other.getBypassGlobals());
+      }
+      result = result && (hasThrottle() == other.hasThrottle());
+      if (hasThrottle()) {
+        result = result && getThrottle()
+            .equals(other.getThrottle());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasBypassGlobals()) {
+        hash = (37 * hash) + BYPASS_GLOBALS_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getBypassGlobals());
+      }
+      if (hasThrottle()) {
+        hash = (37 * hash) + THROTTLE_FIELD_NUMBER;
+        hash = (53 * hash) + getThrottle().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code Quotas}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotasOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Quotas_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Quotas_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getThrottleFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        bypassGlobals_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (throttleBuilder_ == null) {
+          throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance();
+        } else {
+          throttleBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_Quotas_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas build() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.bypassGlobals_ = bypassGlobals_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (throttleBuilder_ == null) {
+          result.throttle_ = throttle_;
+        } else {
+          result.throttle_ = throttleBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas.getDefaultInstance()) return this;
+        if (other.hasBypassGlobals()) {
+          setBypassGlobals(other.getBypassGlobals());
+        }
+        if (other.hasThrottle()) {
+          mergeThrottle(other.getThrottle());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (hasThrottle()) {
+          if (!getThrottle().isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional bool bypass_globals = 1 [default = false];
+      private boolean bypassGlobals_ ;
+      /**
+       * optional bool bypass_globals = 1 [default = false];
+       */
+      public boolean hasBypassGlobals() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * optional bool bypass_globals = 1 [default = false];
+       */
+      public boolean getBypassGlobals() {
+        return bypassGlobals_;
+      }
+      /**
+       * optional bool bypass_globals = 1 [default = false];
+       */
+      public Builder setBypassGlobals(boolean value) {
+        bitField0_ |= 0x00000001;
+        bypassGlobals_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional bool bypass_globals = 1 [default = false];
+       */
+      public Builder clearBypassGlobals() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        bypassGlobals_ = false;
+        onChanged();
+        return this;
+      }
+
+      // optional .Throttle throttle = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder> throttleBuilder_;
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public boolean hasThrottle() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle getThrottle() {
+        if (throttleBuilder_ == null) {
+          return throttle_;
+        } else {
+          return throttleBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public Builder setThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle value) {
+        if (throttleBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          throttle_ = value;
+          onChanged();
+        } else {
+          throttleBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public Builder setThrottle(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder builderForValue) {
+        if (throttleBuilder_ == null) {
+          throttle_ = builderForValue.build();
+          onChanged();
+        } else {
+          throttleBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public Builder mergeThrottle(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle value) {
+        if (throttleBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              throttle_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance()) {
+            throttle_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.newBuilder(throttle_).mergeFrom(value).buildPartial();
+          } else {
+            throttle_ = value;
+          }
+          onChanged();
+        } else {
+          throttleBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public Builder clearThrottle() {
+        if (throttleBuilder_ == null) {
+          throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance();
+          onChanged();
+        } else {
+          throttleBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder getThrottleBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getThrottleFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder getThrottleOrBuilder() {
+        if (throttleBuilder_ != null) {
+          return throttleBuilder_.getMessageOrBuilder();
+        } else {
+          return throttle_;
+        }
+      }
+      /**
+       * optional .Throttle throttle = 2;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder> 
+          getThrottleFieldBuilder() {
+        if (throttleBuilder_ == null) {
+          throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder>(
+                  throttle_,
+                  getParentForChildren(),
+                  isClean());
+          throttle_ = null;
+        }
+        return throttleBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:Quotas)
+    }
+
+    static {
+      defaultInstance = new Quotas(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:Quotas)
+  }
+
+  public interface QuotaUsageOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code QuotaUsage}
+   */
+  public static final class QuotaUsage extends
+      com.google.protobuf.GeneratedMessage
+      implements QuotaUsageOrBuilder {
+    // Use QuotaUsage.newBuilder() to construct.
+    private QuotaUsage(com.google.protobuf.GeneratedMessage.Builder builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private QuotaUsage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final QuotaUsage defaultInstance;
+    public static QuotaUsage getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public QuotaUsage getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private QuotaUsage(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_QuotaUsage_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_QuotaUsage_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser PARSER =
+        new com.google.protobuf.AbstractParser() {
+      public QuotaUsage parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new QuotaUsage(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage) obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code QuotaUsage}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsageOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_QuotaUsage_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_QuotaUsage_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_QuotaUsage_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage build() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaUsage) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:QuotaUsage)
+    }
+
+    static {
+      defaultInstance = new QuotaUsage(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:QuotaUsage)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_TimedQuota_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_TimedQuota_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_Throttle_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_Throttle_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_ThrottleRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_ThrottleRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_Quotas_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_Quotas_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_QuotaUsage_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_QuotaUsage_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\013Quota.proto\032\013HBase.proto\"r\n\nTimedQuota" +
+      "\022\034\n\ttime_unit\030\001 \002(\0162\t.TimeUnit\022\022\n\nsoft_l" +
+      "imit\030\002 \001(\004\022\r\n\005share\030\003 \001(\002\022#\n\005scope\030\004 \001(\016" +
+      "2\013.QuotaScope:\007MACHINE\"\307\001\n\010Throttle\022\034\n\007r" +
+      "eq_num\030\001 \001(\0132\013.TimedQuota\022\035\n\010req_size\030\002 " +
+      "\001(\0132\013.TimedQuota\022\036\n\twrite_num\030\003 \001(\0132\013.Ti" +
+      "medQuota\022\037\n\nwrite_size\030\004 \001(\0132\013.TimedQuot" +
+      "a\022\035\n\010read_num\030\005 \001(\0132\013.TimedQuota\022\036\n\tread" +
+      "_size\030\006 \001(\0132\013.TimedQuota\"P\n\017ThrottleRequ" +
+      "est\022\033\n\004type\030\001 \001(\0162\r.ThrottleType\022 \n\013time",
+      "d_quota\030\002 \001(\0132\013.TimedQuota\"D\n\006Quotas\022\035\n\016" +
+      "bypass_globals\030\001 \001(\010:\005false\022\033\n\010throttle\030" +
+      "\002 \001(\0132\t.Throttle\"\014\n\nQuotaUsage*&\n\nQuotaS" +
+      "cope\022\013\n\007CLUSTER\020\001\022\013\n\007MACHINE\020\002*v\n\014Thrott" +
+      "leType\022\022\n\016REQUEST_NUMBER\020\001\022\020\n\014REQUEST_SI" +
+      "ZE\020\002\022\020\n\014WRITE_NUMBER\020\003\022\016\n\nWRITE_SIZE\020\004\022\017" +
+      "\n\013READ_NUMBER\020\005\022\r\n\tREAD_SIZE\020\006*\031\n\tQuotaT" +
+      "ype\022\014\n\010THROTTLE\020\001BA\n*org.apache.hadoop.h" +
+      "base.protobuf.generatedB\013QuotaProtosH\001\210\001" +
+      "\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_TimedQuota_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_TimedQuota_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_TimedQuota_descriptor,
+              new java.lang.String[] { "TimeUnit", "SoftLimit", "Share", "Scope", });
+          internal_static_Throttle_descriptor =
+            getDescriptor().getMessageTypes().get(1);
+          internal_static_Throttle_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_Throttle_descriptor,
+              new java.lang.String[] { "ReqNum", "ReqSize", "WriteNum", "WriteSize", "ReadNum", "ReadSize", });
+          internal_static_ThrottleRequest_descriptor =
+            getDescriptor().getMessageTypes().get(2);
+          internal_static_ThrottleRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_ThrottleRequest_descriptor,
+              new java.lang.String[] { "Type", "TimedQuota", });
+          internal_static_Quotas_descriptor =
+            getDescriptor().getMessageTypes().get(3);
+          internal_static_Quotas_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_Quotas_descriptor,
+              new java.lang.String[] { "BypassGlobals", "Throttle", });
+          internal_static_QuotaUsage_descriptor =
+            getDescriptor().getMessageTypes().get(4);
+          internal_static_QuotaUsage_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_QuotaUsage_descriptor,
+              new java.lang.String[] { });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index 24941ff..64e5cb9 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -179,6 +179,16 @@ message ProcedureDescription {
 message EmptyMsg {
 }
 
+enum TimeUnit {
+  NANOSECONDS = 1;
+  MICROSECONDS = 2;
+  MILLISECONDS = 3;
+  SECONDS = 4;
+  MINUTES = 5;
+  HOURS = 6;
+  DAYS = 7;
+}
+
 message LongMsg {
   required int64 long_msg = 1;
 }
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 170a326..7b76b65 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -28,6 +28,7 @@ option optimize_for = SPEED;
 import "HBase.proto";
 import "Client.proto";
 import "ClusterStatus.proto";
+import "Quota.proto";
 
 /* Column-level protobufs */
 
@@ -364,6 +365,20 @@ message IsProcedureDoneResponse {
         optional ProcedureDescription snapshot = 2;
 }
 
+message SetQuotaRequest {
+  optional string user_name = 1;
+  optional string user_group = 2;
+  optional string namespace = 3;
+  optional TableName table_name = 4;
+
+  optional bool remove_all = 5;
+  optional bool bypass_globals = 6;
+  optional ThrottleRequest throttle = 7;
+}
+
+message SetQuotaResponse {
+}
+
 message MajorCompactionTimestampRequest {
   required TableName table_name = 1;
 }
@@ -584,6 +599,9 @@ service MasterService {
   rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
     returns(ListTableNamesByNamespaceResponse);
 
+  /** Apply the new quota settings */
+  rpc SetQuota(SetQuotaRequest) returns(SetQuotaResponse);
+
   /** Returns the timestamp of the last major compaction */
   rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest)
     returns(MajorCompactionTimestampResponse);
diff --git a/hbase-protocol/src/main/protobuf/Quota.proto b/hbase-protocol/src/main/protobuf/Quota.proto
new file mode 100644
index 0000000..6ef15fe
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/Quota.proto
@@ -0,0 +1,73 @@
+ /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "QuotaProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+enum QuotaScope {
+  CLUSTER = 1;
+  MACHINE = 2;
+}
+
+message TimedQuota {
+  required TimeUnit time_unit = 1;
+  optional uint64 soft_limit  = 2;
+  optional float share = 3;
+  optional QuotaScope scope  = 4 [default = MACHINE];
+}
+
+enum ThrottleType {
+  REQUEST_NUMBER = 1;
+  REQUEST_SIZE   = 2;
+  WRITE_NUMBER   = 3;
+  WRITE_SIZE     = 4;
+  READ_NUMBER    = 5;
+  READ_SIZE      = 6;
+}
+
+message Throttle {
+  optional TimedQuota req_num  = 1;
+  optional TimedQuota req_size = 2;
+
+  optional TimedQuota write_num  = 3;
+  optional TimedQuota write_size = 4;
+
+  optional TimedQuota read_num  = 5;
+  optional TimedQuota read_size = 6;
+}
+
+message ThrottleRequest {
+  optional ThrottleType type = 1;
+  optional TimedQuota timed_quota = 2;
+}
+
+enum QuotaType {
+  THROTTLE = 1;
+}
+
+message Quotas {
+  optional bool bypass_globals = 1 [default = false];
+  optional Throttle throttle = 2;
+}
+
+message QuotaUsage {
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index 98c0563..49f21d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 
 import java.io.IOException;
 import java.util.List;
@@ -467,4 +468,54 @@ public abstract class BaseMasterAndRegionObserver extends BaseRegionObserver
   public void postTableFlush(ObserverContext ctx,
       TableName tableName) throws IOException {
   }
+  
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final String namespace, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final String namespace, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetTableQuota(final ObserverContext ctx,
+      final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetTableQuota(final ObserverContext ctx,
+      final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetNamespaceQuota(final ObserverContext ctx,
+      final String namespace, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetNamespaceQuota(final ObserverContext ctx,
+      final String namespace, final Quotas quotas) throws IOException {
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index 4748a1b..99a8552 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 
 import java.io.IOException;
 import java.util.List;
@@ -462,4 +463,53 @@ public class BaseMasterObserver implements MasterObserver {
       TableName tableName) throws IOException {
   }
 
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final String namespace, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final String namespace, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetTableQuota(final ObserverContext ctx,
+      final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetTableQuota(final ObserverContext ctx,
+      final TableName tableName, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void preSetNamespaceQuota(final ObserverContext ctx,
+      final String namespace, final Quotas quotas) throws IOException {
+  }
+
+  @Override
+  public void postSetNamespaceQuota(final ObserverContext ctx,
+      final String namespace, final Quotas quotas) throws IOException {
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 2d99754..5dc50da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 
 /**
  * Defines coprocessor hooks for interacting with operations on the
@@ -842,4 +843,108 @@ public interface MasterObserver extends Coprocessor {
    */
   void postTableFlush(final ObserverContext ctx,
       final TableName tableName) throws IOException;
+  
+  /**
+   * Called before the quota for the user is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param userName the name of user
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final Quotas quotas) throws IOException;
+
+  /**
+   * Called after the quota for the user is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param userName the name of user
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final Quotas quotas) throws IOException;
+
+  /**
+   * Called before the quota for the user on the specified table is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param userName the name of user
+   * @param tableName the name of the table
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final TableName tableName, final Quotas quotas) throws IOException;
+
+  /**
+   * Called after the quota for the user on the specified table is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param userName the name of user
+   * @param tableName the name of the table
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final TableName tableName, final Quotas quotas) throws IOException;
+
+  /**
+   * Called before the quota for the user on the specified namespace is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param userName the name of user
+   * @param namespace the name of the namespace
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final String namespace, final Quotas quotas) throws IOException;
+
+  /**
+   * Called after the quota for the user on the specified namespace is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param userName the name of user
+   * @param namespace the name of the namespace
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void postSetUserQuota(final ObserverContext ctx,
+      final String userName, final String namespace, final Quotas quotas) throws IOException;
+
+  /**
+   * Called before the quota for the table is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param tableName the name of the table
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void preSetTableQuota(final ObserverContext ctx,
+      final TableName tableName, final Quotas quotas) throws IOException;
+
+  /**
+   * Called after the quota for the table is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param tableName the name of the table
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void postSetTableQuota(final ObserverContext ctx,
+      final TableName tableName, final Quotas quotas) throws IOException;
+
+  /**
+   * Called before the quota for the namespace is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param namespace the name of the namespace
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void preSetNamespaceQuota(final ObserverContext ctx,
+      final String namespace, final Quotas quotas) throws IOException;
+
+  /**
+   * Called after the quota for the namespace is stored.
+   * @param ctx the environment to interact with the framework and master
+   * @param namespace the name of the namespace
+   * @param quotas the quota settings
+   * @throws IOException
+   */
+  void postSetNamespaceQuota(final ObserverContext ctx,
+      final String namespace, final Quotas quotas) throws IOException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 982815d..b894991 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -519,6 +519,10 @@ public class RpcServer implements RpcServerInterface {
         this.responder.doRespond(this);
       }
     }
+    
+    public UserGroupInformation getRemoteUser() {
+      return connection.user;
+    }
   }
 
   /** Listens on the socket. Creates jobs for the handler threads*/
@@ -2414,6 +2418,7 @@ public class RpcServer implements RpcServerInterface {
     }
   }
 
+  @Override
   public RpcScheduler getScheduler() {
     return scheduler;
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
index b133ed6..013d256 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
@@ -73,4 +73,6 @@ public interface RpcServerInterface {
    */
   @VisibleForTesting
   void refreshAuthManager(PolicyProvider pp);
+  
+  RpcScheduler getScheduler();
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 0e654b3..35d466f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -110,6 +110,7 @@ import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
@@ -291,6 +292,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   SnapshotManager snapshotManager;
   // monitor for distributed procedures
   MasterProcedureManagerHost mpmHost;
+  
+  private MasterQuotaManager quotaManager;
 
   /** flag used in test cases in order to simulate RS failures during master initialization */
   private volatile boolean initializationBeforeMetaAssignment = false;
@@ -722,6 +725,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
     status.setStatus("Starting namespace manager");
     initNamespace();
+    
+    status.setStatus("Starting quota manager");
+    initQuotaManager();
 
     if (this.cpHost != null) {
       try {
@@ -762,6 +768,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     zombieDetector.interrupt();
   }
 
+  private void initQuotaManager() throws IOException {
+    quotaManager = new MasterQuotaManager(this);
+    quotaManager.start();
+  }
+
   /**
    * Create a {@link ServerManager} instance.
    * @param master
@@ -1064,6 +1075,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     // Clean up and close up shop
     if (this.logCleaner != null) this.logCleaner.cancel(true);
     if (this.hfileCleaner != null) this.hfileCleaner.cancel(true);
+    if (this.quotaManager != null) this.quotaManager.stop();
     if (this.activeMasterManager != null) this.activeMasterManager.stop();
     if (this.serverManager != null) this.serverManager.stop();
     if (this.assignmentManager != null) this.assignmentManager.stop();
@@ -1855,6 +1867,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   public MasterCoprocessorHost getMasterCoprocessorHost() {
     return cpHost;
   }
+  
+  @Override
+  public MasterQuotaManager getMasterQuotaManager() {
+    return quotaManager;
+  }
 
   @Override
   public ServerName getServerName() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 0fffdab..547c692 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.coprocessor.*;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 
 import java.io.IOException;
 import java.util.List;
@@ -930,6 +931,110 @@ public class MasterCoprocessorHost
       }
     });
   }
+  
+  public void preSetUserQuota(final String user, final Quotas quotas) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.preSetUserQuota(ctx, user, quotas);
+      }
+    });
+  }
+
+  public void postSetUserQuota(final String user, final Quotas quotas) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.postSetUserQuota(ctx, user, quotas);
+      }
+    });
+  }
+
+  public void preSetUserQuota(final String user, final TableName table, final Quotas quotas)
+      throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.preSetUserQuota(ctx, user, table, quotas);
+      }
+    });
+  }
+
+  public void postSetUserQuota(final String user, final TableName table, final Quotas quotas)
+      throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.postSetUserQuota(ctx, user, table, quotas);
+      }
+    });
+  }
+
+  public void preSetUserQuota(final String user, final String namespace, final Quotas quotas)
+      throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.preSetUserQuota(ctx, user, namespace, quotas);
+      }
+    });
+  }
+
+  public void postSetUserQuota(final String user, final String namespace, final Quotas quotas)
+      throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.postSetUserQuota(ctx, user, namespace, quotas);
+      }
+    });
+  }
+
+  public void preSetTableQuota(final TableName table, final Quotas quotas) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.preSetTableQuota(ctx, table, quotas);
+      }
+    });
+  }
+
+  public void postSetTableQuota(final TableName table, final Quotas quotas) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.postSetTableQuota(ctx, table, quotas);
+      }
+    });
+  }
+
+  public void preSetNamespaceQuota(final String namespace, final Quotas quotas) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.preSetNamespaceQuota(ctx, namespace, quotas);
+      }
+    });
+  }
+
+  public void postSetNamespaceQuota(final String namespace, final Quotas quotas) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext ctx)
+          throws IOException {
+        oserver.postSetNamespaceQuota(ctx, namespace, quotas);
+      }
+    });
+  }
 
   private static abstract class CoprocessorOperation
       extends ObserverContext {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 7b05133..67c3df8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -127,6 +127,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanReq
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
@@ -1279,4 +1281,14 @@ public class MasterRpcServices extends RSRpcServices
     }
     return response.build();
   }
+  
+  @Override
+  public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException {
+    try {
+      master.checkInitialized();
+      return master.getMasterQuotaManager().setQuota(req);
+    } catch (Exception e) {
+      throw new ServiceException(e);
+    }
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 458e53c..dbe7b68 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 
 import com.google.protobuf.Service;
 
@@ -266,4 +267,10 @@ public interface MasterServices extends Server {
    * @throws IOException
    */
   public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
+  
+  /**
+   * @return Master's instance of {@link MasterQuotaManager}
+   */
+  MasterQuotaManager getMasterQuotaManager();
+
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java
new file mode 100644
index 0000000..34c749e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.quotas.OperationQuota.AvgOperationSize;
+import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DefaultOperationQuota implements OperationQuota {
+  private static final Log LOG = LogFactory.getLog(DefaultOperationQuota.class);
+
+  private final List limiters;
+  private long writeAvailable = 0;
+  private long readAvailable = 0;
+  private long writeConsumed = 0;
+  private long readConsumed = 0;
+
+  private AvgOperationSize avgOpSize = new AvgOperationSize();
+
+  public DefaultOperationQuota(final QuotaLimiter... limiters) {
+    this(Arrays.asList(limiters));
+  }
+
+  /**
+   * NOTE: The order matters. It should be something like [user, table, namespace, global]
+   */
+  public DefaultOperationQuota(final List limiters) {
+    this.limiters = limiters;
+  }
+
+  @Override
+  public void checkQuota(int numWrites, int numReads, int numScans)
+      throws ThrottlingException {
+    writeConsumed = estimateConsume(OperationType.MUTATE, numWrites, 100);
+    readConsumed  = estimateConsume(OperationType.GET, numReads, 100);
+    readConsumed += estimateConsume(OperationType.SCAN, numScans, 1000);
+
+    writeAvailable = Long.MAX_VALUE;
+    readAvailable = Long.MAX_VALUE;
+    for (final QuotaLimiter limiter: limiters) {
+      if (limiter.isBypass()) continue;
+
+      limiter.checkQuota(writeConsumed, readConsumed);
+      readAvailable = Math.min(readAvailable, limiter.getReadAvailable());
+      writeAvailable = Math.min(writeAvailable, limiter.getWriteAvailable());
+    }
+
+    for (final QuotaLimiter limiter: limiters) {
+      limiter.grabQuota(writeConsumed, readConsumed);
+    }
+  }
+
+  @Override
+  public void close() {
+    // Calculate and set the average size of get, scan and mutate for the current operation
+    long getSize = avgOpSize.getAvgOperationSize(OperationType.GET);
+    long scanSize = avgOpSize.getAvgOperationSize(OperationType.SCAN);
+    long mutationSize = avgOpSize.getAvgOperationSize(OperationType.MUTATE);
+    for (final QuotaLimiter limiter: limiters) {
+      limiter.addOperationSize(OperationType.GET, getSize);
+      limiter.addOperationSize(OperationType.SCAN, scanSize);
+      limiter.addOperationSize(OperationType.MUTATE, mutationSize);
+    }
+
+    // Adjust the quota consumed for the specified operation
+    long writeDiff = avgOpSize.getOperationSize(OperationType.MUTATE) - writeConsumed;
+    long readDiff = (avgOpSize.getOperationSize(OperationType.GET) +
+                     avgOpSize.getOperationSize(OperationType.SCAN)) - readConsumed;
+    for (final QuotaLimiter limiter: limiters) {
+      if (writeDiff != 0) limiter.consumeWrite(writeDiff);
+      if (readDiff != 0) limiter.consumeRead(readDiff);
+    }
+  }
+
+  @Override
+  public long getReadAvailable() {
+    return readAvailable;
+  }
+
+  @Override
+  public long getWriteAvailable() {
+    return writeAvailable;
+  }
+
+  @Override
+  public void addGetResult(final Result result) {
+    avgOpSize.addGetResult(result);
+  }
+
+  @Override
+  public void addScanResult(final List results) {
+    avgOpSize.addScanResult(results);
+  }
+
+  @Override
+  public void addMutation(final Mutation mutation) {
+    avgOpSize.addMutation(mutation);
+  }
+
+  @Override
+  public long getAvgOperationSize(OperationType type) {
+    return avgOpSize.getAvgOperationSize(type);
+  }
+
+  private long estimateConsume(final OperationType type, int numReqs, long avgSize) {
+    if (numReqs > 0) {
+      for (final QuotaLimiter limiter: limiters) {
+        long size = limiter.getAvgOperationSize(type);
+        if (size > 0) {
+          avgSize = size;
+          break;
+        }
+      }
+      return avgSize * numReqs;
+    }
+    return 0;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
new file mode 100644
index 0000000..6a57156
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -0,0 +1,426 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+import java.util.HashSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
+
+/**
+ * Master Quota Manager.
+ * It is responsible for initialize the quota table on the first-run and
+ * provide the admin operations to interact with the quota table.
+ *
+ * TODO: FUTURE: The master will be responsible to notify each RS of quota changes
+ * and it will do the "quota aggregation" when the QuotaScope is CLUSTER.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class MasterQuotaManager {
+  private static final Log LOG = LogFactory.getLog(MasterQuotaManager.class);
+
+  private final MasterServices masterServices;
+  private NamedLock namespaceLocks;
+  private NamedLock tableLocks;
+  private NamedLock userLocks;
+  private boolean enabled = false;
+
+  public MasterQuotaManager(final MasterServices masterServices) {
+    this.masterServices = masterServices;
+  }
+
+  public void start() throws IOException {
+    // If the user doesn't want the quota support skip all the initializations.
+    if (!QuotaUtil.isQuotaEnabled(masterServices.getConfiguration())) {
+      LOG.info("Quota support disabled");
+      return;
+    }
+
+    // Create the quota table if missing
+    if (!MetaTableAccessor.tableExists(masterServices.getConnection(),
+          QuotaUtil.QUOTA_TABLE_NAME)) {
+      LOG.info("Quota table not found. Creating...");
+      createQuotaTable();
+    }
+
+    LOG.info("Initializing quota support");
+    namespaceLocks = new NamedLock();
+    tableLocks = new NamedLock();
+    userLocks = new NamedLock();
+
+    enabled = true;
+  }
+
+  public void stop() {
+  }
+
+  public boolean isQuotaEnabled() {
+    return enabled;
+  }
+
+  /* ==========================================================================
+   *  Admin operations to manage the quota table
+   */
+  public SetQuotaResponse setQuota(final SetQuotaRequest req)
+      throws IOException, InterruptedException {
+    checkQuotaSupport();
+
+    if (req.hasUserName()) {
+      userLocks.lock(req.getUserName());
+      try {
+        if (req.hasTableName()) {
+          setUserQuota(req.getUserName(), ProtobufUtil.toTableName(req.getTableName()), req);
+        } else if (req.hasNamespace()) {
+          setUserQuota(req.getUserName(), req.getNamespace(), req);
+        } else {
+          setUserQuota(req.getUserName(), req);
+        }
+      } finally {
+        userLocks.unlock(req.getUserName());
+      }
+    } else if (req.hasTableName()) {
+      TableName table = ProtobufUtil.toTableName(req.getTableName());
+      tableLocks.lock(table);
+      try {
+        setTableQuota(table, req);
+      } finally {
+        tableLocks.unlock(table);
+      }
+    } else if (req.hasNamespace()) {
+      namespaceLocks.lock(req.getNamespace());
+      try {
+        setNamespaceQuota(req.getNamespace(), req);
+      } finally {
+        namespaceLocks.unlock(req.getNamespace());
+      }
+    } else {
+      throw new DoNotRetryIOException(
+        new UnsupportedOperationException("a user, a table or a namespace must be specified"));
+    }
+    return SetQuotaResponse.newBuilder().build();
+  }
+
+  public void setUserQuota(final String userName, final SetQuotaRequest req)
+      throws IOException, InterruptedException {
+    setQuota(req, new SetQuotaOperations() {
+      @Override
+      public Quotas fetch() throws IOException {
+        return QuotaUtil.getUserQuota(masterServices.getConnection(), userName);
+      }
+      @Override
+      public void update(final Quotas quotas) throws IOException {
+        QuotaUtil.addUserQuota(masterServices.getConnection(), userName, quotas);
+      }
+      @Override
+      public void delete() throws IOException {
+        QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName);
+      }
+      @Override
+      public void preApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().preSetUserQuota(userName, quotas);
+      }
+      @Override
+      public void postApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().postSetUserQuota(userName, quotas);
+      }
+    });
+  }
+
+  public void setUserQuota(final String userName, final TableName table,
+      final SetQuotaRequest req) throws IOException, InterruptedException {
+    setQuota(req, new SetQuotaOperations() {
+      @Override
+      public Quotas fetch() throws IOException {
+        return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, table);
+      }
+      @Override
+      public void update(final Quotas quotas) throws IOException {
+        QuotaUtil.addUserQuota(masterServices.getConnection(), userName, table, quotas);
+      }
+      @Override
+      public void delete() throws IOException {
+        QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, table);
+      }
+      @Override
+      public void preApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().preSetUserQuota(userName, table, quotas);
+      }
+      @Override
+      public void postApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().postSetUserQuota(userName, table, quotas);
+      }
+    });
+  }
+
+  public void setUserQuota(final String userName, final String namespace,
+      final SetQuotaRequest req) throws IOException, InterruptedException {
+    setQuota(req, new SetQuotaOperations() {
+      @Override
+      public Quotas fetch() throws IOException {
+        return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, namespace);
+      }
+      @Override
+      public void update(final Quotas quotas) throws IOException {
+        QuotaUtil.addUserQuota(masterServices.getConnection(), userName, namespace, quotas);
+      }
+      @Override
+      public void delete() throws IOException {
+        QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, namespace);
+      }
+      @Override
+      public void preApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().preSetUserQuota(userName, namespace, quotas);
+      }
+      @Override
+      public void postApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().postSetUserQuota(userName, namespace, quotas);
+      }
+    });
+  }
+
+  public void setTableQuota(final TableName table, final SetQuotaRequest req)
+      throws IOException, InterruptedException {
+    setQuota(req, new SetQuotaOperations() {
+      @Override
+      public Quotas fetch() throws IOException {
+        return QuotaUtil.getTableQuota(masterServices.getConnection(), table);
+      }
+      @Override
+      public void update(final Quotas quotas) throws IOException {
+        QuotaUtil.addTableQuota(masterServices.getConnection(), table, quotas);
+      }
+      @Override
+      public void delete() throws IOException {
+        QuotaUtil.deleteTableQuota(masterServices.getConnection(), table);
+      }
+      @Override
+      public void preApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().preSetTableQuota(table, quotas);
+      }
+      @Override
+      public void postApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().postSetTableQuota(table, quotas);
+      }
+    });
+  }
+
+  public void setNamespaceQuota(final String namespace, final SetQuotaRequest req)
+      throws IOException, InterruptedException {
+    setQuota(req, new SetQuotaOperations() {
+      @Override
+      public Quotas fetch() throws IOException {
+        return QuotaUtil.getNamespaceQuota(masterServices.getConnection(), namespace);
+      }
+      @Override
+      public void update(final Quotas quotas) throws IOException {
+        QuotaUtil.addNamespaceQuota(masterServices.getConnection(), namespace, quotas);
+      }
+      @Override
+      public void delete() throws IOException {
+        QuotaUtil.deleteNamespaceQuota(masterServices.getConnection(), namespace);
+      }
+      @Override
+      public void preApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().preSetNamespaceQuota(namespace, quotas);
+      }
+      @Override
+      public void postApply(final Quotas quotas) throws IOException {
+        masterServices.getMasterCoprocessorHost().postSetNamespaceQuota(namespace, quotas);
+      }
+    });
+  }
+
+  private void setQuota(final SetQuotaRequest req, final SetQuotaOperations quotaOps)
+      throws IOException, InterruptedException {
+    if (req.hasRemoveAll() && req.getRemoveAll() == true) {
+      quotaOps.preApply(null);
+      quotaOps.delete();
+      quotaOps.postApply(null);
+      return;
+    }
+
+    // Apply quota changes
+    Quotas quotas = quotaOps.fetch();
+    quotaOps.preApply(quotas);
+
+    Quotas.Builder builder = (quotas != null) ? quotas.toBuilder() : Quotas.newBuilder();
+    if (req.hasThrottle()) applyThrottle(builder, req.getThrottle());
+    if (req.hasBypassGlobals()) applyBypassGlobals(builder, req.getBypassGlobals());
+
+    // Submit new changes
+    quotas = builder.build();
+    if (QuotaUtil.isEmptyQuota(quotas)) {
+      quotaOps.delete();
+    } else {
+      quotaOps.update(quotas);
+    }
+    quotaOps.postApply(quotas);
+  }
+
+  private static interface SetQuotaOperations {
+    Quotas fetch() throws IOException;
+    void delete() throws IOException;
+    void update(final Quotas quotas) throws IOException;
+    void preApply(final Quotas quotas) throws IOException;
+    void postApply(final Quotas quotas) throws IOException;
+  }
+
+  /* ==========================================================================
+   *  Helpers to apply changes to the quotas
+   */
+  private void applyThrottle(final Quotas.Builder quotas, final ThrottleRequest req)
+      throws IOException {
+    Throttle.Builder throttle;
+
+    if (req.hasType() && (req.hasTimedQuota() || quotas.hasThrottle())) {
+      // Validate timed quota if present
+      if (req.hasTimedQuota()) validateTimedQuota(req.getTimedQuota());
+
+      // apply the new settings
+      throttle = quotas.hasThrottle() ? quotas.getThrottle().toBuilder() : Throttle.newBuilder();
+
+      switch (req.getType()) {
+        case REQUEST_NUMBER:
+          if (req.hasTimedQuota()) {
+            throttle.setReqNum(req.getTimedQuota());
+          } else {
+            throttle.clearReqNum();
+          }
+          break;
+        case REQUEST_SIZE:
+          if (req.hasTimedQuota()) {
+            throttle.setReqSize(req.getTimedQuota());
+          } else {
+            throttle.clearReqSize();
+          }
+          break;
+        case WRITE_NUMBER:
+          if (req.hasTimedQuota()) {
+            throttle.setWriteNum(req.getTimedQuota());
+          } else {
+            throttle.clearWriteNum();
+          }
+          break;
+        case WRITE_SIZE:
+          if (req.hasTimedQuota()) {
+            throttle.setWriteSize(req.getTimedQuota());
+          } else {
+            throttle.clearWriteSize();
+          }
+          break;
+        case READ_NUMBER:
+          if (req.hasTimedQuota()) {
+            throttle.setReadNum(req.getTimedQuota());
+          } else {
+            throttle.clearReqNum();
+          }
+          break;
+        case READ_SIZE:
+          if (req.hasTimedQuota()) {
+            throttle.setReadSize(req.getTimedQuota());
+          } else {
+            throttle.clearReadSize();
+          }
+          break;
+      }
+      quotas.setThrottle(throttle.build());
+    } else {
+      quotas.clearThrottle();
+    }
+  }
+
+  private void applyBypassGlobals(final Quotas.Builder quotas, boolean bypassGlobals) {
+    if (bypassGlobals) {
+      quotas.setBypassGlobals(bypassGlobals);
+    } else {
+      quotas.clearBypassGlobals();
+    }
+  }
+
+  private void validateTimedQuota(final TimedQuota timedQuota) throws IOException {
+    if (timedQuota.getSoftLimit() < 1) {
+      throw new DoNotRetryIOException(new UnsupportedOperationException(
+          "The throttle limit must be greater then 0, got " + timedQuota.getSoftLimit()));
+    }
+  }
+
+  /* ==========================================================================
+   *  Helpers
+   */
+
+  private void checkQuotaSupport() throws IOException {
+    if (!enabled) {
+      throw new DoNotRetryIOException(
+        new UnsupportedOperationException("quota support disabled"));
+    }
+  }
+
+  private void createQuotaTable() throws IOException {
+    HRegionInfo newRegions[] = new HRegionInfo[] {
+      new HRegionInfo(QuotaUtil.QUOTA_TABLE_NAME)
+    };
+
+    masterServices.getExecutorService()
+      .submit(new CreateTableHandler(masterServices,
+        masterServices.getMasterFileSystem(),
+        QuotaUtil.QUOTA_TABLE_DESC,
+        masterServices.getConfiguration(),
+        newRegions,
+        masterServices)
+          .prepare());
+  }
+
+  private static class NamedLock {
+    private HashSet locks = new HashSet();
+
+    public void lock(final T name) throws InterruptedException {
+      synchronized (locks) {
+        while (locks.contains(name)) {
+          locks.wait();
+        }
+        locks.add(name);
+      }
+    }
+
+    public void unlock(final T name) {
+      synchronized (locks) {
+        locks.remove(name);
+        locks.notifyAll();
+      }
+    }
+  }
+}
+
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java
new file mode 100644
index 0000000..e67c7c0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopOperationQuota.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Result;
+
+/**
+ * Noop operation quota returned when no quota is associated to the user/table
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class NoopOperationQuota implements OperationQuota {
+  private static OperationQuota instance = new NoopOperationQuota();
+
+  private NoopOperationQuota() {
+    // no-op
+  }
+
+  public static OperationQuota get() {
+    return instance;
+  }
+
+  @Override
+  public void checkQuota(int numWrites, int numReads, int numScans)
+      throws ThrottlingException {
+    // no-op
+  }
+
+  @Override
+  public void close() {
+    // no-op
+  }
+
+  @Override
+  public void addGetResult(final Result result) {
+    // no-op
+  }
+
+  @Override
+  public void addScanResult(final List results) {
+    // no-op
+  }
+
+  @Override
+  public void addMutation(final Mutation mutation) {
+    // no-op
+  }
+
+  @Override
+  public long getReadAvailable() {
+    return Long.MAX_VALUE;
+  }
+
+  @Override
+  public long getWriteAvailable() {
+    return Long.MAX_VALUE;
+  }
+
+  @Override
+  public long getAvgOperationSize(OperationType type) {
+    return -1;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java
new file mode 100644
index 0000000..2273dc0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
+
+/**
+ * Noop quota limiter returned when no limiter is associated to the user/table
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class NoopQuotaLimiter implements QuotaLimiter {
+  private static QuotaLimiter instance = new NoopQuotaLimiter();
+
+  private NoopQuotaLimiter() {
+    // no-op
+  }
+
+  @Override
+  public void checkQuota(long estimateWriteSize, long estimateReadSize)
+      throws ThrottlingException {
+    // no-op
+  }
+
+  @Override
+  public void grabQuota(long writeSize, long readSize) {
+    // no-op
+  }
+
+  @Override
+  public void consumeWrite(final long size) {
+    // no-op
+  }
+
+  @Override
+  public void consumeRead(final long size) {
+    // no-op
+  }
+
+  @Override
+  public boolean isBypass() {
+    return true;
+  }
+
+  @Override
+  public long getWriteAvailable() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public long getReadAvailable() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void addOperationSize(OperationType type, long size) {
+  }
+
+  @Override
+  public long getAvgOperationSize(OperationType type) {
+    return -1;
+  }
+
+  @Override
+  public String toString() {
+    return "NoopQuotaLimiter";
+  }
+
+  public static QuotaLimiter get() {
+    return instance;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java
new file mode 100644
index 0000000..b885ac9
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Result;
+
+/**
+ * Interface that allows to check the quota available for an operation.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface OperationQuota {
+  public enum OperationType { MUTATE, GET, SCAN }
+
+  /**
+   * Keeps track of the average data size of operations like get, scan, mutate
+   */
+  public class AvgOperationSize {
+    private final long[] sizeSum;
+    private final long[] count;
+
+    public AvgOperationSize() {
+      int size = OperationType.values().length;
+      sizeSum = new long[size];
+      count = new long[size];
+      for (int i = 0; i < size; ++i) {
+        sizeSum[i] = 0;
+        count[i] = 0;
+      }
+    }
+
+    public void addOperationSize(OperationType type, long size) {
+      if (size > 0) {
+        int index = type.ordinal();
+        sizeSum[index] += size;
+        count[index]++;
+      }
+    }
+
+    public long getAvgOperationSize(OperationType type) {
+      int index = type.ordinal();
+      return count[index] > 0 ? sizeSum[index] / count[index] : 0;
+    }
+
+    public long getOperationSize(OperationType type) {
+      return sizeSum[type.ordinal()];
+    }
+
+    public void addGetResult(final Result result) {
+      long size = QuotaUtil.calculateResultSize(result);
+      addOperationSize(OperationType.GET, size);
+    }
+
+    public void addScanResult(final List results) {
+      long size = QuotaUtil.calculateResultSize(results);
+      addOperationSize(OperationType.SCAN, size);
+    }
+
+    public void addMutation(final Mutation mutation) {
+      long size = QuotaUtil.calculateMutationSize(mutation);
+      addOperationSize(OperationType.MUTATE, size);
+    }
+  }
+
+  /**
+   * Checks if it is possible to execute the specified operation.
+   * The quota will be estimated based on the number of operations to perform
+   * and the average size accumulated during time.
+   *
+   * @param numWrites number of write operation that will be performed
+   * @param numReads number of small-read operation that will be performed
+   * @param numScans number of long-read operation that will be performed
+   * @throws ThrottlingException if the operation cannot be performed
+   */
+  void checkQuota(int numWrites, int numReads, int numScans)
+    throws ThrottlingException;
+
+  /** Cleanup method on operation completion */
+  void close();
+
+  /**
+   * Add a get result. This will be used to calculate the exact quota and
+   * have a better short-read average size for the next time.
+   */
+  void addGetResult(Result result);
+
+  /**
+   * Add a scan result. This will be used to calculate the exact quota and
+   * have a better long-read average size for the next time.
+   */
+  void addScanResult(List results);
+
+  /**
+   * Add a mutation result. This will be used to calculate the exact quota and
+   * have a better mutation average size for the next time.
+   */
+  void addMutation(Mutation mutation);
+
+  /** @return the number of bytes available to read to avoid exceeding the quota */
+  long getReadAvailable();
+
+  /** @return the number of bytes available to write to avoid exceeding the quota */
+  long getWriteAvailable();
+
+  /** @return the average data size of the specified operation */
+  long getAvgOperationSize(OperationType type);
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
new file mode 100644
index 0000000..15962d2
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
@@ -0,0 +1,326 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Cache that keeps track of the quota settings for the users and tables that
+ * are interacting with it.
+ *
+ * To avoid blocking the operations if the requested quota is not in cache
+ * an "empty quota" will be returned and the request to fetch the quota information
+ * will be enqueued for the next refresh.
+ *
+ * TODO: At the moment the Cache has a Chore that will be triggered every 5min
+ * or on cache-miss events. Later the Quotas will be pushed using the notification system.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class QuotaCache implements Stoppable {
+  private static final Log LOG = LogFactory.getLog(QuotaCache.class);
+
+  public static final String REFRESH_CONF_KEY = "hbase.quota.refresh.period";
+  private static final int REFRESH_DEFAULT_PERIOD = 5 * 60000; // 5min
+  private static final int EVICT_PERIOD_FACTOR = 5; // N * REFRESH_DEFAULT_PERIOD
+
+  // for testing purpose only, enforce the cache to be always refreshed
+  static boolean TEST_FORCE_REFRESH = false;
+
+  private final ConcurrentHashMap namespaceQuotaCache =
+      new ConcurrentHashMap();
+  private final ConcurrentHashMap tableQuotaCache =
+      new ConcurrentHashMap();
+  private final ConcurrentHashMap userQuotaCache =
+      new ConcurrentHashMap();
+  private final RegionServerServices rsServices;
+
+  private QuotaRefresherChore refreshChore;
+  private boolean stopped = true;
+
+  public QuotaCache(final RegionServerServices rsServices) {
+    this.rsServices = rsServices;
+  }
+
+  public void start() throws IOException {
+    stopped = false;
+
+    // TODO: This will be replaced once we have the notification bus ready.
+    Configuration conf = rsServices.getConfiguration();
+    int period = conf.getInt(REFRESH_CONF_KEY, REFRESH_DEFAULT_PERIOD);
+    refreshChore = new QuotaRefresherChore(period, this);
+    rsServices.getChoreService().scheduleChore(refreshChore);
+  }
+
+  @Override
+  public void stop(final String why) {
+    stopped = true;
+  }
+
+  @Override
+  public boolean isStopped() {
+    return stopped;
+  }
+
+  /**
+   * Returns the limiter associated to the specified user/table.
+   *
+   * @param ugi the user to limit
+   * @param table the table to limit
+   * @return the limiter associated to the specified user/table
+   */
+  public QuotaLimiter getUserLimiter(final UserGroupInformation ugi, final TableName table) {
+    if (table.isSystemTable()) {
+      return NoopQuotaLimiter.get();
+    }
+    return getUserQuotaState(ugi).getTableLimiter(table);
+  }
+
+  /**
+   * Returns the QuotaState associated to the specified user.
+   *
+   * @param ugi the user
+   * @return the quota info associated to specified user
+   */
+  public UserQuotaState getUserQuotaState(final UserGroupInformation ugi) {
+    String key = ugi.getShortUserName();
+    UserQuotaState quotaInfo = userQuotaCache.get(key);
+    if (quotaInfo == null) {
+      quotaInfo = new UserQuotaState();
+      if (userQuotaCache.putIfAbsent(key, quotaInfo) == null) {
+        triggerCacheRefresh();
+      }
+    }
+    return quotaInfo;
+  }
+
+  /**
+   * Returns the limiter associated to the specified table.
+   *
+   * @param table the table to limit
+   * @return the limiter associated to the specified table
+   */
+  public QuotaLimiter getTableLimiter(final TableName table) {
+    return getQuotaState(this.tableQuotaCache, table).getGlobalLimiter();
+  }
+
+  /**
+   * Returns the limiter associated to the specified namespace.
+   *
+   * @param namespace the namespace to limit
+   * @return the limiter associated to the specified namespace
+   */
+  public QuotaLimiter getNamespaceLimiter(final String namespace) {
+    return getQuotaState(this.namespaceQuotaCache, namespace).getGlobalLimiter();
+  }
+
+  /**
+   * Returns the QuotaState requested.
+   * If the quota info is not in cache an empty one will be returned
+   * and the quota request will be enqueued for the next cache refresh.
+   */
+  private  QuotaState getQuotaState(final ConcurrentHashMap quotasMap,
+      final K key) {
+    QuotaState quotaInfo = quotasMap.get(key);
+    if (quotaInfo == null) {
+      quotaInfo = new QuotaState();
+      if (quotasMap.putIfAbsent(key, quotaInfo) == null) {
+        triggerCacheRefresh();
+      }
+    }
+    return quotaInfo;
+  }
+
+  private Configuration getConfiguration() {
+    return rsServices.getConfiguration();
+  }
+
+  @VisibleForTesting
+  void triggerCacheRefresh() {
+    refreshChore.triggerNow();
+  }
+
+  @VisibleForTesting
+  long getLastUpdate() {
+    return refreshChore.lastUpdate;
+  }
+
+  @VisibleForTesting
+  Map getNamespaceQuotaCache() {
+    return namespaceQuotaCache;
+  }
+
+  @VisibleForTesting
+  Map getTableQuotaCache() {
+    return tableQuotaCache;
+  }
+
+  @VisibleForTesting
+  Map getUserQuotaCache() {
+    return userQuotaCache;
+  }
+
+  // TODO: Remove this once we have the notification bus
+  private class QuotaRefresherChore extends ScheduledChore {
+    private long lastUpdate = 0;
+
+    public QuotaRefresherChore(final int period, final Stoppable stoppable) {
+      super("QuotaRefresherChore", stoppable, period);
+    }
+
+    @Override
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="GC_UNRELATED_TYPES",
+      justification="I do not understand why the complaints, it looks good to me -- FIX")
+    protected void chore() {
+      // Prefetch online tables/namespaces
+      for (TableName table: QuotaCache.this.rsServices.getOnlineTables()) {
+        if (table.isSystemTable()) continue;
+        if (!QuotaCache.this.tableQuotaCache.contains(table)) {
+          QuotaCache.this.tableQuotaCache.putIfAbsent(table, new QuotaState());
+        }
+        String ns = table.getNamespaceAsString();
+        if (!QuotaCache.this.namespaceQuotaCache.contains(ns)) {
+          QuotaCache.this.namespaceQuotaCache.putIfAbsent(ns, new QuotaState());
+        }
+      }
+
+      fetchNamespaceQuotaState();
+      fetchTableQuotaState();
+      fetchUserQuotaState();
+      lastUpdate = EnvironmentEdgeManager.currentTime();
+    }
+
+    private void fetchNamespaceQuotaState() {
+      fetch("namespace", QuotaCache.this.namespaceQuotaCache, new Fetcher() {
+        @Override
+        public Get makeGet(final Map.Entry entry) {
+          return QuotaUtil.makeGetForNamespaceQuotas(entry.getKey());
+        }
+
+        @Override
+        public Map fetchEntries(final List gets)
+            throws IOException {
+          return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), gets);
+        }
+      });
+    }
+
+    private void fetchTableQuotaState() {
+      fetch("table", QuotaCache.this.tableQuotaCache, new Fetcher() {
+        @Override
+        public Get makeGet(final Map.Entry entry) {
+          return QuotaUtil.makeGetForTableQuotas(entry.getKey());
+        }
+
+        @Override
+        public Map fetchEntries(final List gets)
+            throws IOException {
+          return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), gets);
+        }
+      });
+    }
+
+    private void fetchUserQuotaState() {
+      final Set namespaces = QuotaCache.this.namespaceQuotaCache.keySet();
+      final Set tables = QuotaCache.this.tableQuotaCache.keySet();
+      fetch("user", QuotaCache.this.userQuotaCache, new Fetcher() {
+        @Override
+        public Get makeGet(final Map.Entry entry) {
+          return QuotaUtil.makeGetForUserQuotas(entry.getKey(), tables, namespaces);
+        }
+
+        @Override
+        public Map fetchEntries(final List gets)
+            throws IOException {
+          return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), gets);
+        }
+      });
+    }
+
+    private  void fetch(final String type,
+        final ConcurrentHashMap quotasMap, final Fetcher fetcher) {
+      long now = EnvironmentEdgeManager.currentTime();
+      long refreshPeriod = getPeriod();
+      long evictPeriod = refreshPeriod * EVICT_PERIOD_FACTOR;
+
+      // Find the quota entries to update
+      List gets = new ArrayList();
+      List toRemove = new ArrayList();
+      for (Map.Entry entry: quotasMap.entrySet()) {
+        long lastUpdate = entry.getValue().getLastUpdate();
+        long lastQuery = entry.getValue().getLastQuery();
+        if (lastQuery > 0 && (now - lastQuery) >= evictPeriod) {
+          toRemove.add(entry.getKey());
+        } else if (TEST_FORCE_REFRESH || (now - lastUpdate) >= refreshPeriod) {
+          gets.add(fetcher.makeGet(entry));
+        }
+      }
+
+      for (final K key: toRemove) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("evict " + type + " key=" + key);
+        }
+        quotasMap.remove(key);
+      }
+
+      // fetch and update the quota entries
+      if (!gets.isEmpty()) {
+        try {
+          for (Map.Entry entry: fetcher.fetchEntries(gets).entrySet()) {
+            V quotaInfo = quotasMap.putIfAbsent(entry.getKey(), entry.getValue());
+            if (quotaInfo != null) {
+              quotaInfo.update(entry.getValue());
+            }
+
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("refresh " + type + " key=" + entry.getKey() + " quotas=" + quotaInfo);
+            }
+          }
+        } catch (IOException e) {
+          LOG.warn("Unable to read " + type + " from quota table", e);
+        }
+      }
+    }
+  }
+
+  static interface Fetcher {
+    Get makeGet(Map.Entry entry);
+    Map fetchEntries(List gets) throws IOException;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java
new file mode 100644
index 0000000..ffacbc0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
+
+/**
+ * Internal interface used to interact with the user/table quota.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface QuotaLimiter {
+  /**
+   * Checks if it is possible to execute the specified operation.
+   *
+   * @param estimateWriteSize the write size that will be checked against the available quota
+   * @param estimateReadSize the read size that will be checked against the available quota
+   * @throws ThrottlingException thrown if not enough avialable resources to perform operation.
+   */
+  void checkQuota(long estimateWriteSize, long estimateReadSize)
+    throws ThrottlingException;
+
+  /**
+   * Removes the specified write and read amount from the quota.
+   * At this point the write and read amount will be an estimate,
+   * that will be later adjusted with a consumeWrite()/consumeRead() call.
+   *
+   * @param writeSize the write size that will be removed from the current quota
+   * @param readSize the read size that will be removed from the current quota
+   */
+  void grabQuota(long writeSize, long readSize);
+
+  /**
+   * Removes or add back some write amount to the quota.
+   * (called at the end of an operation in case the estimate quota was off)
+   */
+  void consumeWrite(long size);
+
+  /**
+   * Removes or add back some read amount to the quota.
+   * (called at the end of an operation in case the estimate quota was off)
+   */
+  void consumeRead(long size);
+
+  /** @return true if the limiter is a noop */
+  boolean isBypass();
+
+    /** @return the number of bytes available to read to avoid exceeding the quota */
+  long getReadAvailable();
+
+  /** @return the number of bytes available to write to avoid exceeding the quota */
+  long getWriteAvailable();
+
+  /**
+   * Add the average size of the specified operation type.
+   * The average will be used as estimate for the next operations.
+   */
+  void addOperationSize(OperationType type, long size);
+
+  /** @return the average data size of the specified operation */
+  long getAvgOperationSize(OperationType type);
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java
new file mode 100644
index 0000000..3c759f0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class QuotaLimiterFactory {
+  public static QuotaLimiter fromThrottle(final Throttle throttle) {
+    return TimeBasedLimiter.fromThrottle(throttle);
+  }
+
+  public static QuotaLimiter update(final QuotaLimiter a, final QuotaLimiter b) {
+    if (a.getClass().equals(b.getClass()) && a instanceof TimeBasedLimiter) {
+      ((TimeBasedLimiter)a).update(((TimeBasedLimiter)b));
+      return a;
+    }
+    throw new UnsupportedOperationException("TODO not implemented yet");
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java
new file mode 100644
index 0000000..3804a6f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * In-Memory state of table or namespace quotas
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class QuotaState {
+  protected long lastUpdate = 0;
+  protected long lastQuery = 0;
+
+  protected QuotaLimiter globalLimiter = NoopQuotaLimiter.get();
+
+  public QuotaState() {
+    this(0);
+  }
+
+  public QuotaState(final long updateTs) {
+    lastUpdate = updateTs;
+  }
+
+  public synchronized long getLastUpdate() {
+    return lastUpdate;
+  }
+
+  public synchronized long getLastQuery() {
+    return lastQuery;
+  }
+
+  @Override
+  public synchronized String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("QuotaState(ts=" + getLastUpdate());
+    if (isBypass()) {
+      builder.append(" bypass");
+    } else {
+      if (globalLimiter != NoopQuotaLimiter.get()) {
+        //builder.append(" global-limiter");
+        builder.append(" " + globalLimiter);
+      }
+    }
+    builder.append(')');
+    return builder.toString();
+  }
+
+  /**
+   * @return true if there is no quota information associated to this object
+   */
+  public synchronized boolean isBypass() {
+    return globalLimiter == NoopQuotaLimiter.get();
+  }
+
+  /**
+   * Setup the global quota information.
+   * (This operation is part of the QuotaState setup)
+   */
+  public void setQuotas(final Quotas quotas) {
+    if (quotas.hasThrottle()) {
+      globalLimiter = QuotaLimiterFactory.fromThrottle(quotas.getThrottle());
+    } else {
+      globalLimiter = NoopQuotaLimiter.get();
+    }
+  }
+
+  /**
+   * Perform an update of the quota info based on the other quota info object.
+   * (This operation is executed by the QuotaCache)
+   */
+  public synchronized void update(final QuotaState other) {
+    if (globalLimiter == NoopQuotaLimiter.get()) {
+      globalLimiter = other.globalLimiter;
+    } else if (other.globalLimiter == NoopQuotaLimiter.get()) {
+      globalLimiter = NoopQuotaLimiter.get();
+    } else {
+      globalLimiter = QuotaLimiterFactory.update(globalLimiter, other.globalLimiter);
+    }
+    lastUpdate = other.lastUpdate;
+  }
+
+  /**
+   * Return the limiter associated with this quota.
+   * @return the quota limiter
+   */
+  public synchronized QuotaLimiter getGlobalLimiter() {
+    lastQuery = EnvironmentEdgeManager.currentTime();
+    return globalLimiter;
+  }
+
+  /**
+   * Return the limiter associated with this quota without updating internal last query stats
+   * @return the quota limiter
+   */
+  synchronized QuotaLimiter getGlobalLimiterWithoutUpdatingLastQuery() {
+    return globalLimiter;
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
new file mode 100644
index 0000000..bff648d
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
@@ -0,0 +1,311 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * Helper class to interact with the quota table
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class QuotaUtil extends QuotaTableUtil {
+  private static final Log LOG = LogFactory.getLog(QuotaUtil.class);
+
+  public static final String QUOTA_CONF_KEY = "hbase.quota.enabled";
+  private static final boolean QUOTA_ENABLED_DEFAULT = false;
+
+  /** Table descriptor for Quota internal table */
+  public static final HTableDescriptor QUOTA_TABLE_DESC =
+    new HTableDescriptor(QUOTA_TABLE_NAME);
+  static {
+    QUOTA_TABLE_DESC.addFamily(
+      new HColumnDescriptor(QUOTA_FAMILY_INFO)
+        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+        .setBloomFilterType(BloomType.ROW)
+        .setMaxVersions(1)
+    );
+    QUOTA_TABLE_DESC.addFamily(
+      new HColumnDescriptor(QUOTA_FAMILY_USAGE)
+        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+        .setBloomFilterType(BloomType.ROW)
+        .setMaxVersions(1)
+    );
+  }
+
+  /** Returns true if the support for quota is enabled */
+  public static boolean isQuotaEnabled(final Configuration conf) {
+    return conf.getBoolean(QUOTA_CONF_KEY, QUOTA_ENABLED_DEFAULT);
+  }
+
+  /* =========================================================================
+   *  Quota "settings" helpers
+   */
+  public static void addTableQuota(final Connection connection, final TableName table,
+      final Quotas data) throws IOException {
+    addQuotas(connection, getTableRowKey(table), data);
+  }
+
+  public static void deleteTableQuota(final Connection connection, final TableName table)
+      throws IOException {
+    deleteQuotas(connection, getTableRowKey(table));
+  }
+
+  public static void addNamespaceQuota(final Connection connection, final String namespace,
+      final Quotas data) throws IOException {
+    addQuotas(connection, getNamespaceRowKey(namespace), data);
+  }
+
+  public static void deleteNamespaceQuota(final Connection connection, final String namespace)
+      throws IOException {
+    deleteQuotas(connection, getNamespaceRowKey(namespace));
+  }
+
+  public static void addUserQuota(final Connection connection, final String user,
+      final Quotas data) throws IOException {
+    addQuotas(connection, getUserRowKey(user), data);
+  }
+
+  public static void addUserQuota(final Connection connection, final String user,
+      final TableName table, final Quotas data) throws IOException {
+    addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table), data);
+  }
+
+  public static void addUserQuota(final Connection connection, final String user,
+      final String namespace, final Quotas data) throws IOException {
+    addQuotas(connection, getUserRowKey(user),
+        getSettingsQualifierForUserNamespace(namespace), data);
+  }
+
+  public static void deleteUserQuota(final Connection connection, final String user)
+      throws IOException {
+    deleteQuotas(connection, getUserRowKey(user));
+  }
+
+  public static void deleteUserQuota(final Connection connection, final String user,
+      final TableName table) throws IOException {
+    deleteQuotas(connection, getUserRowKey(user),
+        getSettingsQualifierForUserTable(table));
+  }
+
+  public static void deleteUserQuota(final Connection connection, final String user,
+      final String namespace) throws IOException {
+    deleteQuotas(connection, getUserRowKey(user),
+        getSettingsQualifierForUserNamespace(namespace));
+  }
+
+  private static void addQuotas(final Connection connection, final byte[] rowKey,
+      final Quotas data) throws IOException {
+    addQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS, data);
+  }
+
+  private static void addQuotas(final Connection connection, final byte[] rowKey,
+      final byte[] qualifier, final Quotas data) throws IOException {
+    Put put = new Put(rowKey);
+    put.add(QUOTA_FAMILY_INFO, qualifier, quotasToData(data));
+    doPut(connection, put);
+  }
+
+  private static void deleteQuotas(final Connection connection, final byte[] rowKey)
+      throws IOException {
+    deleteQuotas(connection, rowKey, null);
+  }
+
+  private static void deleteQuotas(final Connection connection, final byte[] rowKey,
+      final byte[] qualifier) throws IOException {
+    Delete delete = new Delete(rowKey);
+    if (qualifier != null) {
+      delete.deleteColumns(QUOTA_FAMILY_INFO, qualifier);
+    }
+    doDelete(connection, delete);
+  }
+
+  public static Map fetchUserQuotas(final Connection connection,
+      final List gets) throws IOException {
+    long nowTs = EnvironmentEdgeManager.currentTime();
+    Result[] results = doGet(connection, gets);
+
+    Map userQuotas = new HashMap(results.length);
+    for (int i = 0; i < results.length; ++i) {
+      byte[] key = gets.get(i).getRow();
+      assert isUserRowKey(key);
+      String user = getUserFromRowKey(key);
+
+      final UserQuotaState quotaInfo = new UserQuotaState(nowTs);
+      userQuotas.put(user, quotaInfo);
+
+      if (results[i].isEmpty()) continue;
+      assert Bytes.equals(key, results[i].getRow());
+
+      try {
+        parseUserResult(user, results[i], new UserQuotasVisitor() {
+          @Override
+          public void visitUserQuotas(String userName, String namespace, Quotas quotas) {
+            quotaInfo.setQuotas(namespace, quotas);
+          }
+
+          @Override
+          public void visitUserQuotas(String userName, TableName table, Quotas quotas) {
+            quotaInfo.setQuotas(table, quotas);
+          }
+
+          @Override
+          public void visitUserQuotas(String userName, Quotas quotas) {
+            quotaInfo.setQuotas(quotas);
+          }
+        });
+      } catch (IOException e) {
+        LOG.error("Unable to parse user '" + user + "' quotas", e);
+        userQuotas.remove(user);
+      }
+    }
+    return userQuotas;
+  }
+
+  public static Map fetchTableQuotas(final Connection connection,
+      final List gets) throws IOException {
+    return fetchGlobalQuotas("table", connection, gets, new KeyFromRow() {
+      @Override
+      public TableName getKeyFromRow(final byte[] row) {
+        assert isTableRowKey(row);
+        return getTableFromRowKey(row);
+      }
+    });
+  }
+
+  public static Map fetchNamespaceQuotas(final Connection connection,
+      final List gets) throws IOException {
+    return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow() {
+      @Override
+      public String getKeyFromRow(final byte[] row) {
+        assert isNamespaceRowKey(row);
+        return getNamespaceFromRowKey(row);
+      }
+    });
+  }
+
+  public static  Map fetchGlobalQuotas(final String type,
+      final Connection connection, final List gets, final KeyFromRow kfr)
+  throws IOException {
+    long nowTs = EnvironmentEdgeManager.currentTime();
+    Result[] results = doGet(connection, gets);
+
+    Map globalQuotas = new HashMap(results.length);
+    for (int i = 0; i < results.length; ++i) {
+      byte[] row = gets.get(i).getRow();
+      K key = kfr.getKeyFromRow(row);
+
+      QuotaState quotaInfo = new QuotaState(nowTs);
+      globalQuotas.put(key, quotaInfo);
+
+      if (results[i].isEmpty()) continue;
+      assert Bytes.equals(row, results[i].getRow());
+
+      byte[] data = results[i].getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS);
+      if (data == null) continue;
+
+      try {
+        Quotas quotas = quotasFromData(data);
+        quotaInfo.setQuotas(quotas);
+      } catch (IOException e) {
+        LOG.error("Unable to parse " + type + " '" + key + "' quotas", e);
+        globalQuotas.remove(key);
+      }
+    }
+    return globalQuotas;
+  }
+
+  private static interface KeyFromRow {
+    T getKeyFromRow(final byte[] row);
+  }
+
+  /* =========================================================================
+   *  HTable helpers
+   */
+  private static void doPut(final Connection connection, final Put put)
+  throws IOException {
+    try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
+      table.put(put);
+    }
+  }
+
+  private static void doDelete(final Connection connection, final Delete delete)
+  throws IOException {
+    try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
+      table.delete(delete);
+    }
+  }
+
+  /* =========================================================================
+   *  Data Size Helpers
+   */
+  public static long calculateMutationSize(final Mutation mutation) {
+    long size = 0;
+    for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) {
+      for (Cell cell : entry.getValue()) {
+        size += KeyValueUtil.length(cell);
+      }
+    }
+    return size;
+  }
+
+  public static long calculateResultSize(final Result result) {
+    long size = 0;
+    for (Cell cell : result.rawCells()) {
+      size += KeyValueUtil.length(cell);
+    }
+    return size;
+  }
+
+  public static long calculateResultSize(final List results) {
+    long size = 0;
+    for (Result result: results) {
+      for (Cell cell : result.rawCells()) {
+        size += KeyValueUtil.length(cell);
+      }
+    }
+    return size;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
new file mode 100644
index 0000000..1806cc3
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Simple rate limiter.
+ *
+ * Usage Example:
+ *   RateLimiter limiter = new RateLimiter(); // At this point you have a unlimited resource limiter
+ *   limiter.set(10, TimeUnit.SECONDS);       // set 10 resources/sec
+ *
+ *   long lastTs = 0;             // You need to keep track of the last update timestamp
+ *   while (true) {
+ *     long now = System.currentTimeMillis();
+ *
+ *     // call canExecute before performing resource consuming operation
+ *     bool canExecute = limiter.canExecute(now, lastTs);
+ *     // If there are no available resources, wait until one is available
+ *     if (!canExecute) Thread.sleep(limiter.waitInterval());
+ *     // ...execute the work and consume the resource...
+ *     limiter.consume();
+ *   }
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RateLimiter {
+  private long tunit = 1000;           // Timeunit factor for translating to ms.
+  private long limit = Long.MAX_VALUE; // The max value available resource units can be refilled to.
+  private long avail = Long.MAX_VALUE; // Currently available resource units
+
+  public RateLimiter() {
+  }
+
+  /**
+   * Set the RateLimiter max available resources and refill period.
+   * @param limit The max value available resource units can be refilled to.
+   * @param timeUnit Timeunit factor for translating to ms.
+   */
+  public void set(final long limit, final TimeUnit timeUnit) {
+    switch (timeUnit) {
+      case NANOSECONDS:
+        throw new RuntimeException("Unsupported NANOSECONDS TimeUnit");
+      case MICROSECONDS:
+        throw new RuntimeException("Unsupported MICROSECONDS TimeUnit");
+      case MILLISECONDS:
+        tunit = 1;
+        break;
+      case SECONDS:
+        tunit = 1000;
+        break;
+      case MINUTES:
+        tunit = 60 * 1000;
+        break;
+      case HOURS:
+        tunit = 60 * 60 * 1000;
+        break;
+      case DAYS:
+        tunit = 24 * 60 * 60 * 1000;
+        break;
+    }
+    this.limit = limit;
+    this.avail = limit;
+  }
+
+  public String toString() {
+    if (limit == Long.MAX_VALUE) {
+      return "RateLimiter(Bypass)";
+    }
+    return "RateLimiter(avail=" + avail + " limit=" + limit + " tunit=" + tunit + ")";
+  }
+
+  /**
+   * Sets the current instance of RateLimiter to a new values.
+   *
+   * if current limit is smaller than the new limit, bump up the available resources.
+   * Otherwise allow clients to use up the previously available resources.
+   */
+  public synchronized void update(final RateLimiter other) {
+    this.tunit = other.tunit;
+    if (this.limit < other.limit) {
+      this.avail += (other.limit - this.limit);
+    }
+    this.limit = other.limit;
+  }
+
+  public synchronized boolean isBypass() {
+    return limit == Long.MAX_VALUE;
+  }
+
+  public synchronized long getLimit() {
+    return limit;
+  }
+
+  public synchronized long getAvailable() {
+    return avail;
+  }
+
+  /**
+   * given the time interval, is there at least one resource available to allow execution?
+   * @param now the current timestamp
+   * @param lastTs the timestamp of the last update
+   * @return true if there is at least one resource available, otherwise false
+   */
+  public boolean canExecute(final long now, final long lastTs) {
+    return canExecute(now, lastTs, 1);
+  }
+
+  /**
+   * given the time interval, are there enough available resources to allow execution?
+   * @param now the current timestamp
+   * @param lastTs the timestamp of the last update
+   * @param amount the number of required resources
+   * @return true if there are enough available resources, otherwise false
+   */
+  public synchronized boolean canExecute(final long now, final long lastTs, final long amount) {
+    return avail >= amount ? true : refill(now, lastTs) >= amount;
+  }
+
+  /**
+   * consume one available unit.
+   */
+  public void consume() {
+    consume(1);
+  }
+
+  /**
+   * consume amount available units.
+   * @param amount the number of units to consume
+   */
+  public synchronized void consume(final long amount) {
+    this.avail -= amount;
+  }
+
+  /**
+   * @return estimate of the ms required to wait before being able to provide 1 resource.
+   */
+  public long waitInterval() {
+    return waitInterval(1);
+  }
+
+  /**
+   * @return estimate of the ms required to wait before being able to provide "amount" resources.
+   */
+  public synchronized long waitInterval(final long amount) {
+    // TODO Handle over quota?
+    return (amount <= avail) ? 0 : ((amount * tunit) / limit) - ((avail * tunit) / limit);
+  }
+
+  /**
+   * given the specified time interval, refill the avilable units to the proportionate
+   * to elapsed time or to the prespecified limit.
+   */
+  private long refill(final long now, final long lastTs) {
+    long delta = (limit * (now - lastTs)) / tunit;
+    if (delta > 0) {
+      avail = Math.min(limit, avail + delta);
+    }
+    return avail;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java
new file mode 100644
index 0000000..836025f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Region Server Quota Manager.
+ * It is responsible to provide access to the quota information of each user/table.
+ *
+ * The direct user of this class is the RegionServer that will get and check the
+ * user/table quota for each operation (put, get, scan).
+ * For system tables and user/table with a quota specified, the quota check will be a noop.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RegionServerQuotaManager {
+  private static final Log LOG = LogFactory.getLog(RegionServerQuotaManager.class);
+
+  private final RegionServerServices rsServices;
+
+  private QuotaCache quotaCache = null;
+
+  public RegionServerQuotaManager(final RegionServerServices rsServices) {
+    this.rsServices = rsServices;
+  }
+
+  public void start(final RpcScheduler rpcScheduler) throws IOException {
+    if (!QuotaUtil.isQuotaEnabled(rsServices.getConfiguration())) {
+      LOG.info("Quota support disabled");
+      return;
+    }
+
+    LOG.info("Initializing quota support");
+
+    // Initialize quota cache
+    quotaCache = new QuotaCache(rsServices);
+    quotaCache.start();
+  }
+
+  public void stop() {
+    if (isQuotaEnabled()) {
+      quotaCache.stop("shutdown");
+    }
+  }
+
+  public boolean isQuotaEnabled() {
+    return quotaCache != null;
+  }
+
+  @VisibleForTesting
+  QuotaCache getQuotaCache() {
+    return quotaCache;
+  }
+
+  /**
+   * Returns the quota for an operation.
+   *
+   * @param ugi the user that is executing the operation
+   * @param table the table where the operation will be executed
+   * @return the OperationQuota
+   */
+  public OperationQuota getQuota(final UserGroupInformation ugi, final TableName table) {
+    if (isQuotaEnabled() && !table.isSystemTable()) {
+      UserQuotaState userQuotaState = quotaCache.getUserQuotaState(ugi);
+      QuotaLimiter userLimiter = userQuotaState.getTableLimiter(table);
+      boolean useNoop = userLimiter.isBypass();
+      if (userQuotaState.hasBypassGlobals()) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("get quota for ugi=" + ugi + " table=" + table + " userLimiter=" + userLimiter);
+        }
+        if (!useNoop) {
+          return new DefaultOperationQuota(userLimiter);
+        }
+      } else {
+        QuotaLimiter nsLimiter = quotaCache.getNamespaceLimiter(table.getNamespaceAsString());
+        QuotaLimiter tableLimiter = quotaCache.getTableLimiter(table);
+        useNoop &= tableLimiter.isBypass() && nsLimiter.isBypass();
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("get quota for ugi=" + ugi + " table=" + table + " userLimiter=" +
+                    userLimiter + " tableLimiter=" + tableLimiter + " nsLimiter=" + nsLimiter);
+        }
+        if (!useNoop) {
+          return new DefaultOperationQuota(userLimiter, tableLimiter, nsLimiter);
+        }
+      }
+    }
+    return NoopOperationQuota.get();
+  }
+
+  /**
+   * Check the quota for the current (rpc-context) user.
+   * Returns the OperationQuota used to get the available quota and
+   * to report the data/usage of the operation.
+   * @param region the region where the operation will be performed
+   * @param type the operation type
+   * @return the OperationQuota
+   * @throws ThrottlingException if the operation cannot be executed due to quota exceeded.
+   */
+  public OperationQuota checkQuota(final HRegion region,
+      final OperationQuota.OperationType type) throws IOException, ThrottlingException {
+    switch (type) {
+      case SCAN:   return checkQuota(region, 0, 0, 1);
+      case GET:    return checkQuota(region, 0, 1, 0);
+      case MUTATE: return checkQuota(region, 1, 0, 0);
+    }
+    throw new RuntimeException("Invalid operation type: " + type);
+  }
+
+  /**
+   * Check the quota for the current (rpc-context) user.
+   * Returns the OperationQuota used to get the available quota and
+   * to report the data/usage of the operation.
+   * @param region the region where the operation will be performed
+   * @param actions the "multi" actions to perform
+   * @return the OperationQuota
+   * @throws ThrottlingException if the operation cannot be executed due to quota exceeded.
+   */
+  public OperationQuota checkQuota(final HRegion region,
+      final List actions) throws IOException, ThrottlingException {
+    int numWrites = 0;
+    int numReads = 0;
+    for (final ClientProtos.Action action: actions) {
+      if (action.hasMutation()) {
+        numWrites++;
+      } else if (action.hasGet()) {
+        numReads++;
+      }
+    }
+    return checkQuota(region, numWrites, numReads, 0);
+  }
+
+  /**
+   * Check the quota for the current (rpc-context) user.
+   * Returns the OperationQuota used to get the available quota and
+   * to report the data/usage of the operation.
+   * @param region the region where the operation will be performed
+   * @param numWrites number of writes to perform
+   * @param numReads number of short-reads to perform
+   * @param numScans number of scan to perform
+   * @return the OperationQuota
+   * @throws ThrottlingException if the operation cannot be executed due to quota exceeded.
+   */
+  private OperationQuota checkQuota(final HRegion region,
+      final int numWrites, final int numReads, final int numScans)
+      throws IOException, ThrottlingException {
+    UserGroupInformation ugi;
+    if (RequestContext.isInRequestContext()) {
+      ugi = RequestContext.getRequestUser().getUGI();
+    } else {
+      ugi = User.getCurrent().getUGI();
+    }
+    TableName table = region.getTableDesc().getTableName();
+
+    OperationQuota quota = getQuota(ugi, table);
+    try {
+      quota.checkQuota(numWrites, numReads, numScans);
+    } catch (ThrottlingException e) {
+      LOG.debug("Throttling exception for user=" + ugi.getUserName() +
+                " table=" + table + " numWrites=" + numWrites +
+                " numReads=" + numReads + " numScans=" + numScans +
+                ": " + e.getMessage());
+      throw e;
+    }
+    return quota;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java
new file mode 100644
index 0000000..8ca7e6b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
+import org.apache.hadoop.hbase.quotas.OperationQuota.AvgOperationSize;
+import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * Simple time based limiter that checks the quota Throttle
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class TimeBasedLimiter implements QuotaLimiter {
+  private static final Log LOG = LogFactory.getLog(TimeBasedLimiter.class);
+
+  private long writeLastTs = 0;
+  private long readLastTs = 0;
+
+  private RateLimiter reqsLimiter = new RateLimiter();
+  private RateLimiter reqSizeLimiter = new RateLimiter();
+  private RateLimiter writeReqsLimiter = new RateLimiter();
+  private RateLimiter writeSizeLimiter = new RateLimiter();
+  private RateLimiter readReqsLimiter = new RateLimiter();
+  private RateLimiter readSizeLimiter = new RateLimiter();
+  private AvgOperationSize avgOpSize = new AvgOperationSize();
+
+  private TimeBasedLimiter() {
+  }
+
+  static QuotaLimiter fromThrottle(final Throttle throttle) {
+    TimeBasedLimiter limiter = new TimeBasedLimiter();
+    boolean isBypass = true;
+    if (throttle.hasReqNum()) {
+      setFromTimedQuota(limiter.reqsLimiter, throttle.getReqNum());
+      isBypass = false;
+    }
+
+    if (throttle.hasReqSize()) {
+      setFromTimedQuota(limiter.reqSizeLimiter, throttle.getReqSize());
+      isBypass = false;
+    }
+
+    if (throttle.hasWriteNum()) {
+      setFromTimedQuota(limiter.writeReqsLimiter, throttle.getWriteNum());
+      isBypass = false;
+    }
+
+    if (throttle.hasWriteSize()) {
+      setFromTimedQuota(limiter.writeSizeLimiter, throttle.getWriteSize());
+      isBypass = false;
+    }
+
+    if (throttle.hasReadNum()) {
+      setFromTimedQuota(limiter.readReqsLimiter, throttle.getReadNum());
+      isBypass = false;
+    }
+
+    if (throttle.hasReadSize()) {
+      setFromTimedQuota(limiter.readSizeLimiter, throttle.getReadSize());
+      isBypass = false;
+    }
+    return isBypass ? NoopQuotaLimiter.get() : limiter;
+  }
+
+  public void update(final TimeBasedLimiter other) {
+    reqsLimiter.update(other.reqsLimiter);
+    reqSizeLimiter.update(other.reqSizeLimiter);
+    writeReqsLimiter.update(other.writeReqsLimiter);
+    writeSizeLimiter.update(other.writeSizeLimiter);
+    readReqsLimiter.update(other.readReqsLimiter);
+    readSizeLimiter.update(other.readSizeLimiter);
+  }
+
+  private static void setFromTimedQuota(final RateLimiter limiter, final TimedQuota timedQuota) {
+    limiter.set(timedQuota.getSoftLimit(), ProtobufUtil.toTimeUnit(timedQuota.getTimeUnit()));
+  }
+
+  @Override
+  public void checkQuota(long writeSize, long readSize)
+      throws ThrottlingException {
+    long now = EnvironmentEdgeManager.currentTime();
+    long lastTs = Math.max(readLastTs, writeLastTs);
+
+    if (!reqsLimiter.canExecute(now, lastTs)) {
+      ThrottlingException.throwNumRequestsExceeded(reqsLimiter.waitInterval());
+    }
+    if (!reqSizeLimiter.canExecute(now, lastTs, writeSize + readSize)) {
+      ThrottlingException.throwNumRequestsExceeded(reqSizeLimiter.waitInterval(writeSize+readSize));
+    }
+
+    if (writeSize > 0) {
+      if (!writeReqsLimiter.canExecute(now, writeLastTs)) {
+        ThrottlingException.throwNumWriteRequestsExceeded(writeReqsLimiter.waitInterval());
+      }
+      if (!writeSizeLimiter.canExecute(now, writeLastTs, writeSize)) {
+        ThrottlingException.throwWriteSizeExceeded(writeSizeLimiter.waitInterval(writeSize));
+      }
+    }
+
+    if (readSize > 0) {
+      if (!readReqsLimiter.canExecute(now, readLastTs)) {
+        ThrottlingException.throwNumReadRequestsExceeded(readReqsLimiter.waitInterval());
+      }
+      if (!readSizeLimiter.canExecute(now, readLastTs, readSize)) {
+        ThrottlingException.throwReadSizeExceeded(readSizeLimiter.waitInterval(readSize));
+      }
+    }
+  }
+
+  @Override
+  public void grabQuota(long writeSize, long readSize) {
+    assert writeSize != 0 || readSize != 0;
+
+    long now = EnvironmentEdgeManager.currentTime();
+
+    reqsLimiter.consume(1);
+    reqSizeLimiter.consume(writeSize + readSize);
+
+    if (writeSize > 0) {
+      writeReqsLimiter.consume(1);
+      writeSizeLimiter.consume(writeSize);
+      writeLastTs = now;
+    }
+    if (readSize > 0) {
+      readReqsLimiter.consume(1);
+      readSizeLimiter.consume(readSize);
+      readLastTs = now;
+    }
+  }
+
+  @Override
+  public void consumeWrite(final long size) {
+    reqSizeLimiter.consume(size);
+    writeSizeLimiter.consume(size);
+  }
+
+  @Override
+  public void consumeRead(final long size) {
+    reqSizeLimiter.consume(size);
+    readSizeLimiter.consume(size);
+  }
+
+  @Override
+  public boolean isBypass() {
+    return false;
+  }
+
+  @Override
+  public long getWriteAvailable() {
+    return writeSizeLimiter.getAvailable();
+  }
+
+  @Override
+  public long getReadAvailable() {
+    return readSizeLimiter.getAvailable();
+  }
+
+  @Override
+  public void addOperationSize(OperationType type, long size) {
+    avgOpSize.addOperationSize(type, size);
+  }
+
+  @Override
+  public long getAvgOperationSize(OperationType type) {
+    return avgOpSize.getAvgOperationSize(type);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("TimeBasedLimiter(");
+    if (!reqsLimiter.isBypass()) builder.append("reqs=" + reqsLimiter);
+    if (!reqSizeLimiter.isBypass()) builder.append(" resSize=" + reqSizeLimiter);
+    if (!writeReqsLimiter.isBypass()) builder.append(" writeReqs=" + writeReqsLimiter);
+    if (!writeSizeLimiter.isBypass()) builder.append(" writeSize=" + writeSizeLimiter);
+    if (!readReqsLimiter.isBypass()) builder.append(" readReqs=" + readReqsLimiter);
+    if (!readSizeLimiter.isBypass()) builder.append(" readSize=" + readSizeLimiter);
+    builder.append(')');
+    return builder.toString();
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java
new file mode 100644
index 0000000..19fce22
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * In-Memory state of the user quotas
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class UserQuotaState extends QuotaState {
+  private Map namespaceLimiters = null;
+  private Map tableLimiters = null;
+  private boolean bypassGlobals = false;
+
+  public UserQuotaState() {
+    super();
+  }
+
+  public UserQuotaState(final long updateTs) {
+    super(updateTs);
+  }
+
+  @Override
+  public synchronized String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("UserQuotaState(ts=" + getLastUpdate());
+    if (bypassGlobals) builder.append(" bypass-globals");
+
+    if (isBypass()) {
+      builder.append(" bypass");
+    } else {
+      if (getGlobalLimiterWithoutUpdatingLastQuery() != NoopQuotaLimiter.get()) {
+        builder.append(" global-limiter");
+      }
+
+      if (tableLimiters != null && !tableLimiters.isEmpty()) {
+        builder.append(" [");
+        for (TableName table: tableLimiters.keySet()) {
+          builder.append(" " + table);
+        }
+        builder.append(" ]");
+      }
+
+      if (namespaceLimiters != null && !namespaceLimiters.isEmpty()) {
+        builder.append(" [");
+        for (String ns: namespaceLimiters.keySet()) {
+          builder.append(" " + ns);
+        }
+        builder.append(" ]");
+      }
+    }
+    builder.append(')');
+    return builder.toString();
+  }
+
+  /**
+   * @return true if there is no quota information associated to this object
+   */
+  @Override
+  public synchronized boolean isBypass() {
+    return !bypassGlobals &&
+        getGlobalLimiterWithoutUpdatingLastQuery() == NoopQuotaLimiter.get() &&
+        (tableLimiters == null || tableLimiters.isEmpty()) &&
+        (namespaceLimiters == null || namespaceLimiters.isEmpty());
+  }
+
+  public synchronized boolean hasBypassGlobals() {
+    return bypassGlobals;
+  }
+
+  @Override
+  public void setQuotas(final Quotas quotas) {
+    super.setQuotas(quotas);
+    bypassGlobals = quotas.getBypassGlobals();
+  }
+
+  /**
+   * Add the quota information of the specified table.
+   * (This operation is part of the QuotaState setup)
+   */
+  public void setQuotas(final TableName table, Quotas quotas) {
+    tableLimiters = setLimiter(tableLimiters, table, quotas);
+  }
+
+  /**
+   * Add the quota information of the specified namespace.
+   * (This operation is part of the QuotaState setup)
+   */
+  public void setQuotas(final String namespace, Quotas quotas) {
+    namespaceLimiters = setLimiter(namespaceLimiters, namespace, quotas);
+  }
+
+  private  Map setLimiter(Map limiters,
+      final K key, final Quotas quotas) {
+    if (limiters == null) {
+      limiters = new HashMap();
+    }
+
+    QuotaLimiter limiter = quotas.hasThrottle() ?
+      QuotaLimiterFactory.fromThrottle(quotas.getThrottle()) : null;
+    if (limiter != null && !limiter.isBypass()) {
+      limiters.put(key, limiter);
+    } else {
+      limiters.remove(key);
+    }
+    return limiters;
+  }
+
+  /**
+   * Perform an update of the quota state based on the other quota state object.
+   * (This operation is executed by the QuotaCache)
+   */
+  @Override
+  public synchronized void update(final QuotaState other) {
+    super.update(other);
+
+    if (other instanceof UserQuotaState) {
+      UserQuotaState uOther = (UserQuotaState)other;
+      tableLimiters = updateLimiters(tableLimiters, uOther.tableLimiters);
+      namespaceLimiters = updateLimiters(namespaceLimiters, uOther.namespaceLimiters);
+      bypassGlobals = uOther.bypassGlobals;
+    } else {
+      tableLimiters = null;
+      namespaceLimiters = null;
+      bypassGlobals = false;
+    }
+  }
+
+  private static  Map updateLimiters(final Map map,
+      final Map otherMap) {
+    if (map == null) {
+      return otherMap;
+    }
+
+    if (otherMap != null) {
+      // To Remove
+      Set toRemove = new HashSet(map.keySet());
+      toRemove.removeAll(otherMap.keySet());
+      map.keySet().removeAll(toRemove);
+
+      // To Update/Add
+      for (final Map.Entry entry: otherMap.entrySet()) {
+        QuotaLimiter limiter = map.get(entry.getKey());
+        if (limiter == null) {
+          limiter = entry.getValue();
+        } else {
+          limiter = QuotaLimiterFactory.update(limiter, entry.getValue());
+        }
+        map.put(entry.getKey(), limiter);
+      }
+      return map;
+    }
+    return null;
+  }
+
+  /**
+   * Return the limiter for the specified table associated with this quota.
+   * If the table does not have its own quota limiter the global one will be returned.
+   * In case there is no quota limiter associated with this object a noop limiter will be returned.
+   *
+   * @return the quota limiter for the specified table
+   */
+  public synchronized QuotaLimiter getTableLimiter(final TableName table) {
+    lastQuery = EnvironmentEdgeManager.currentTime();
+    if (tableLimiters != null) {
+      QuotaLimiter limiter = tableLimiters.get(table);
+      if (limiter != null) return limiter;
+    }
+    if (namespaceLimiters != null) {
+      QuotaLimiter limiter = namespaceLimiters.get(table.getNamespaceAsString());
+      if (limiter != null) return limiter;
+    }
+    return getGlobalLimiterWithoutUpdatingLastQuery();
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index d5f694a..339551d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -130,6 +130,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
+import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
 import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
 import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
@@ -421,6 +422,8 @@ public class HRegionServer extends HasThread implements
   private RegionServerCoprocessorHost rsHost;
 
   private RegionServerProcedureManagerHost rspmHost;
+  
+  private RegionServerQuotaManager rsQuotaManager;
 
   // Table level lock manager for locking for region operations
   protected TableLockManager tableLockManager;
@@ -806,6 +809,9 @@ public class HRegionServer extends HasThread implements
       nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this);
     }
 
+    // Setup the Quota Manager
+    rsQuotaManager = new RegionServerQuotaManager(this);
+    
     // Setup RPC client for master communication
     rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress(
         rpcServices.isa.getAddress(), 0));
@@ -872,6 +878,9 @@ public class HRegionServer extends HasThread implements
         // since the server is ready to run
         rspmHost.start();
       }
+      
+      // Start the Quota Manager
+      rsQuotaManager.start(getRpcServer().getScheduler());
 
       // We registered with the Master.  Go into run mode.
       long lastMsg = System.currentTimeMillis();
@@ -957,6 +966,11 @@ public class HRegionServer extends HasThread implements
     if (this.nonceManagerChore != null) this.nonceManagerChore.cancel(true);
     if (this.storefileRefresher != null) this.storefileRefresher.cancel(true);
 
+    // Stop the quota manager
+    if (rsQuotaManager != null) {
+      rsQuotaManager.stop();
+    }
+    
     // Stop the snapshot and other procedure handlers, forcefully killing all running tasks
     if (rspmHost != null) {
       rspmHost.stop(this.abortRequested || this.killed);
@@ -2461,6 +2475,11 @@ public class HRegionServer extends HasThread implements
   public ChoreService getChoreService() {
     return choreService;
   }
+  
+  @Override
+  public RegionServerQuotaManager getRegionServerQuotaManager() {
+    return rsQuotaManager;
+  }
 
   //
   // Main program and support routines
@@ -2579,6 +2598,22 @@ public class HRegionServer extends HasThread implements
      }
      return tableRegions;
    }
+  
+  /**
+   * Gets the online tables in this RS.
+   * This method looks at the in-memory onlineRegions.
+   * @return all the online tables in this RS
+   */
+  @Override
+  public Set getOnlineTables() {
+    Set tables = new HashSet();
+    synchronized (this.onlineRegions) {
+      for (HRegion region: this.onlineRegions.values()) {
+        tables.add(region.getTableDesc().getTableName());
+      }
+    }
+    return tables;
+  }
 
   // used by org/apache/hbase/tmpl/regionserver/RSStatusTmpl.jamon (HBASE-4070).
   public String[] getRegionServerCoprocessors() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index a48d055..2897cf4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -150,6 +150,8 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
+import org.apache.hadoop.hbase.quotas.OperationQuota;
+import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
 import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
@@ -446,10 +448,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * bypassed as indicated by RegionObserver, null otherwise
    * @throws IOException
    */
-  private Result append(final HRegion region, final MutationProto m,
+  private Result append(final HRegion region, final OperationQuota quota, final MutationProto m,
       final CellScanner cellScanner, long nonceGroup) throws IOException {
     long before = EnvironmentEdgeManager.currentTime();
     Append append = ProtobufUtil.toAppend(m, cellScanner);
+    quota.addMutation(append);
     Result r = null;
     if (region.getCoprocessorHost() != null) {
       r = region.getCoprocessorHost().preAppend(append);
@@ -482,10 +485,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * @return the Result
    * @throws IOException
    */
-  private Result increment(final HRegion region, final MutationProto mutation,
-      final CellScanner cells, long nonceGroup) throws IOException {
+  private Result increment(final HRegion region, final OperationQuota quota,
+      final MutationProto mutation, final CellScanner cells, long nonceGroup) throws IOException {
     long before = EnvironmentEdgeManager.currentTime();
     Increment increment = ProtobufUtil.toIncrement(mutation, cells);
+    quota.addMutation(increment);
     Result r = null;
     if (region.getCoprocessorHost() != null) {
       r = region.getCoprocessorHost().preIncrement(increment);
@@ -522,7 +526,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * @return Return the cellScanner passed
    */
   private List doNonAtomicRegionMutation(final HRegion region,
-      final RegionAction actions, final CellScanner cellScanner,
+      final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner,
       final RegionActionResult.Builder builder, List cellsToReturn, long nonceGroup) {
     // Gather up CONTIGUOUS Puts and Deletes in this mutations List.  Idea is that rather than do
     // one at a time, we instead pass them in batch.  Be aware that the corresponding
@@ -555,15 +559,15 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           if (type != MutationType.PUT && type != MutationType.DELETE && mutations != null &&
               !mutations.isEmpty()) {
             // Flush out any Puts or Deletes already collected.
-            doBatchOp(builder, region, mutations, cellScanner);
+            doBatchOp(builder, region, quota, mutations, cellScanner);
             mutations.clear();
           }
           switch (type) {
           case APPEND:
-            r = append(region, action.getMutation(), cellScanner, nonceGroup);
+            r = append(region, quota, action.getMutation(), cellScanner, nonceGroup);
             break;
           case INCREMENT:
-            r = increment(region, action.getMutation(), cellScanner,  nonceGroup);
+            r = increment(region, quota, action.getMutation(), cellScanner,  nonceGroup);
             break;
           case PUT:
           case DELETE:
@@ -608,7 +612,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
     // Finish up any outstanding mutations
     if (mutations != null && !mutations.isEmpty()) {
-      doBatchOp(builder, region, mutations, cellScanner);
+      doBatchOp(builder, region, quota, mutations, cellScanner);
     }
     return cellsToReturn;
   }
@@ -620,8 +624,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * @param region
    * @param mutations
    */
-  private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region,
-      final List mutations, final CellScanner cells) {
+  private void
+      doBatchOp(final RegionActionResult.Builder builder, final HRegion region,
+          final OperationQuota quota, final List mutations,
+          final CellScanner cells) {
     Mutation[] mArray = new Mutation[mutations.size()];
     long before = EnvironmentEdgeManager.currentTime();
     boolean batchContainsPuts = false, batchContainsDelete = false;
@@ -638,6 +644,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           batchContainsDelete = true;
         }
         mArray[i++] = mutation;
+        quota.addMutation(mutation);
       }
 
       if (!region.getRegionInfo().isMetaTable()) {
@@ -882,6 +889,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   Configuration getConfiguration() {
     return regionServer.getConfiguration();
   }
+  
+  private RegionServerQuotaManager getQuotaManager() {
+    return regionServer.getRegionServerQuotaManager();
+  }
 
   void start() {
     rpcServer.start();
@@ -1742,6 +1753,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   public GetResponse get(final RpcController controller,
       final GetRequest request) throws ServiceException {
     long before = EnvironmentEdgeManager.currentTime();
+    OperationQuota quota = null;
     try {
       checkOpen();
       requestCount.increment();
@@ -1751,6 +1763,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       ClientProtos.Get get = request.getGet();
       Boolean existence = null;
       Result r = null;
+      quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET);
 
       if (get.hasClosestRowBefore() && get.getClosestRowBefore()) {
         if (get.getColumnCount() != 1) {
@@ -1785,6 +1798,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         ClientProtos.Result pbr = ProtobufUtil.toResult(r);
         builder.setResult(pbr);
       }
+      if (r != null) {
+        quota.addGetResult(r);
+      }
       return builder.build();
     } catch (IOException ie) {
       throw new ServiceException(ie);
@@ -1793,6 +1809,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         regionServer.metricsRegionServer.updateGet(
           EnvironmentEdgeManager.currentTime() - before);
       }
+      if (quota != null) {
+        quota.close();
+      }
     }
   }
 
@@ -1828,10 +1847,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
     for (RegionAction regionAction : request.getRegionActionList()) {
       this.requestCount.add(regionAction.getActionCount());
+      OperationQuota quota;
       HRegion region;
       regionActionResultBuilder.clear();
       try {
         region = getRegion(regionAction.getRegion());
+        quota = getQuotaManager().checkQuota(region, regionAction.getActionList());
       } catch (IOException e) {
         regionActionResultBuilder.setException(ResponseConverter.buildException(e));
         responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
@@ -1868,10 +1889,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         }
       } else {
         // doNonAtomicRegionMutation manages the exception internally
-        cellsToReturn = doNonAtomicRegionMutation(region, regionAction, cellScanner,
+        cellsToReturn = doNonAtomicRegionMutation(region, quota, regionAction, cellScanner,
             regionActionResultBuilder, cellsToReturn, nonceGroup);
       }
       responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
+      quota.close();
     }
     // Load the controller with the Cells to return.
     if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) {
@@ -1895,6 +1917,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     // It is also the conduit via which we pass back data.
     PayloadCarryingRpcController controller = (PayloadCarryingRpcController)rpcc;
     CellScanner cellScanner = controller != null? controller.cellScanner(): null;
+    OperationQuota quota = null;
     // Clear scanner so we are not holding on to reference across call.
     if (controller != null) controller.setCellScanner(null);
     try {
@@ -1911,17 +1934,20 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       Result r = null;
       Boolean processed = null;
       MutationType type = mutation.getMutateType();
+      long mutationSize = 0;
+      quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
       switch (type) {
       case APPEND:
         // TODO: this doesn't actually check anything.
-        r = append(region, mutation, cellScanner, nonceGroup);
+        r = append(region, quota, mutation, cellScanner, nonceGroup);
         break;
       case INCREMENT:
         // TODO: this doesn't actually check anything.
-        r = increment(region, mutation, cellScanner, nonceGroup);
+        r = increment(region, quota, mutation, cellScanner, nonceGroup);
         break;
       case PUT:
         Put put = ProtobufUtil.toPut(mutation, cellScanner);
+        quota.addMutation(put);
         if (request.hasCondition()) {
           Condition condition = request.getCondition();
           byte[] row = condition.getRow().toByteArray();
@@ -1950,6 +1976,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         break;
       case DELETE:
         Delete delete = ProtobufUtil.toDelete(mutation, cellScanner);
+        quota.addMutation(delete);
         if (request.hasCondition()) {
           Condition condition = request.getCondition();
           byte[] row = condition.getRow().toByteArray();
@@ -1986,6 +2013,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     } catch (IOException ie) {
       regionServer.checkFileSystem();
       throw new ServiceException(ie);
+    } finally {
+      if (quota != null) {
+        quota.close();
+      }
     }
   }
 
@@ -1999,6 +2030,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   @Override
   public ScanResponse scan(final RpcController controller, final ScanRequest request)
   throws ServiceException {
+    OperationQuota quota = null;
     Leases.Lease lease = null;
     String scannerName = null;
     try {
@@ -2086,6 +2118,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         ttl = this.scannerLeaseTimeoutPeriod;
       }
 
+      quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
+      long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
       if (rows > 0) {
         // if nextCallSeq does not match throw Exception straight away. This needs to be
         // performed even before checking of Lease.
@@ -2131,9 +2165,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           }
 
           if (!done) {
-            long maxResultSize = scanner.getMaxResultSize();
+            long maxResultSize = Math.min(scanner.getMaxResultSize(), maxQuotaResultSize);
             if (maxResultSize <= 0) {
-              maxResultSize = maxScannerResultSize;
+              maxResultSize = maxQuotaResultSize;
             }
             List values = new ArrayList();
             region.startRegionOperation(Operation.SCAN);
@@ -2211,6 +2245,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
               region.getCoprocessorHost().postScannerNext(scanner, results, rows, true);
             }
           }
+          
+          quota.addScanResult(results);
 
           // If the scanner's filter - if any - is done with the scan
           // and wants to tell the client to stop the scan. This is done by passing
@@ -2271,6 +2307,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         }
       }
       throw new ServiceException(ie);
+    } finally {
+      if (quota != null) {
+        quota.close();
+      }
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index cf95528..c914082 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -20,15 +20,18 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.zookeeper.KeeperException;
 
@@ -68,6 +71,11 @@ public interface RegionServerServices
    * @return RegionServer's instance of {@link TableLockManager}
    */
   TableLockManager getTableLockManager();
+  
+  /**
+   * @return RegionServer's instance of {@link RegionServerQuotaManager}
+   */
+  RegionServerQuotaManager getRegionServerQuotaManager();
 
   /**
    * Tasks to perform after region open to complete deploy of region on
@@ -147,4 +155,9 @@ public interface RegionServerServices
    * @see org.apache.hadoop.hbase.regionserver.Store#getCompactionPressure()
    */
   double getCompactionPressure();
+  
+  /**
+   * @return all the online tables in this RS
+   */
+  Set getOnlineTables();
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 1c6ed22..b2eff1b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest;
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -2383,4 +2384,34 @@ public class AccessController extends BaseMasterAndRegionObserver
   public void postReplicateLogEntries(ObserverContext ctx,
       List entries, CellScanner cells) throws IOException {
   }
+  
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final Quotas quotas) throws IOException {
+    requirePermission("setUserQuota", Action.ADMIN);
+  }
+
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final TableName tableName, final Quotas quotas) throws IOException {
+    requirePermission("setUserTableQuota", tableName, null, null, Action.ADMIN);
+  }
+
+  @Override
+  public void preSetUserQuota(final ObserverContext ctx,
+      final String userName, final String namespace, final Quotas quotas) throws IOException {
+    requirePermission("setUserNamespaceQuota", Action.ADMIN);
+  }
+
+  @Override
+  public void preSetTableQuota(final ObserverContext ctx,
+      final TableName tableName, final Quotas quotas) throws IOException {
+    requirePermission("setTableQuota", tableName, null, null, Action.ADMIN);
+  }
+
+  @Override
+  public void preSetNamespaceQuota(final ObserverContext ctx,
+      final String namespace, final Quotas quotas) throws IOException {
+    requirePermission("setNamespaceQuota", Action.ADMIN);
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
index 8d1664b..4a93151 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
@@ -90,6 +90,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements
 
     public E poll() {
       E elem = objects[head];
+      objects[head] = null;
       head = (head + 1) % objects.length;
       if (head == 0) tail = 0;
       return elem;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index aecd525..daf358d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.CompactionRequestor;
 import org.apache.hadoop.hbase.regionserver.FlushRequester;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -100,6 +102,11 @@ public class MockRegionServerServices implements RegionServerServices {
   public List getOnlineRegions(TableName tableName) throws IOException {
     return null;
   }
+  
+  @Override
+  public Set getOnlineTables() {
+    return null;
+  }
 
   @Override
   public void addToOnlineRegions(HRegion r) {
@@ -169,6 +176,11 @@ public class MockRegionServerServices implements RegionServerServices {
   public TableLockManager getTableLockManager() {
     return new NullTableLockManager();
   }
+  
+  @Override
+  public RegionServerQuotaManager getRegionServerQuotaManager() {
+    return null;
+  }
 
   @Override
   public ServerName getServerName() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index c85ba83..ba5ca2c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
@@ -1092,6 +1093,56 @@ public class TestMasterObserver {
     public void postTableFlush(ObserverContext ctx,
         TableName tableName) throws IOException {
     }
+    
+    @Override
+    public void preSetUserQuota(final ObserverContext ctx,
+        final String userName, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void postSetUserQuota(final ObserverContext ctx,
+        final String userName, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void preSetUserQuota(final ObserverContext ctx,
+        final String userName, final TableName tableName, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void postSetUserQuota(final ObserverContext ctx,
+        final String userName, final TableName tableName, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void preSetUserQuota(final ObserverContext ctx,
+        final String userName, final String namespace, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void postSetUserQuota(final ObserverContext ctx,
+        final String userName, final String namespace, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void preSetTableQuota(final ObserverContext ctx,
+        final TableName tableName, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void postSetTableQuota(final ObserverContext ctx,
+        final TableName tableName, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void preSetNamespaceQuota(final ObserverContext ctx,
+        final String namespace, final Quotas quotas) throws IOException {
+    }
+
+    @Override
+    public void postSetNamespaceQuota(final ObserverContext ctx,
+        final String namespace, final Quotas quotas) throws IOException {
+    }
   }
 
   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 7b5c494..0dc0cc9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 
@@ -90,6 +91,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.CompactionRequestor;
 import org.apache.hadoop.hbase.regionserver.FlushRequester;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -323,6 +325,11 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   public TableLockManager getTableLockManager() {
     return new NullTableLockManager();
   }
+  
+  @Override
+  public RegionServerQuotaManager getRegionServerQuotaManager() {
+    return null;
+  }
 
   @Override
   public void postOpenDeployTasks(HRegion r)
@@ -520,6 +527,11 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
     // TODO Auto-generated method stub
     return null;
   }
+  
+  @Override
+  public Set getOnlineTables() {
+    return null;
+  }
 
   @Override
   public Leases getLeases() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index b88c747..4e6b01f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -250,6 +251,11 @@ public class TestCatalogJanitor {
     public MasterCoprocessorHost getMasterCoprocessorHost() {
       return null;
     }
+    
+    @Override
+    public MasterQuotaManager getMasterQuotaManager() {
+      return null;
+    }
 
     @Override
     public ServerManager getServerManager() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
new file mode 100644
index 0000000..8885606
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * minicluster tests that validate that quota  entries are properly set in the quota table
+ */
+@Category({MediumTests.class})
+public class TestQuotaAdmin {
+  final Log LOG = LogFactory.getLog(getClass());
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
+    TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, 2000);
+    TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
+    TEST_UTIL.startMiniCluster(1);
+    TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testSimpleScan() throws Exception {
+    Admin admin = TEST_UTIL.getHBaseAdmin();
+    String userName = User.getCurrent().getShortName();
+
+    admin.setQuota(QuotaSettingsFactory
+      .throttleUser(userName, ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, true));
+
+    QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration());
+    try {
+      int countThrottle = 0;
+      int countGlobalBypass = 0;
+      for (QuotaSettings settings: scanner) {
+        LOG.debug(settings);
+        switch (settings.getQuotaType()) {
+          case THROTTLE:
+            ThrottleSettings throttle = (ThrottleSettings)settings;
+            assertEquals(userName, throttle.getUserName());
+            assertEquals(null, throttle.getTableName());
+            assertEquals(null, throttle.getNamespace());
+            assertEquals(6, throttle.getSoftLimit());
+            assertEquals(TimeUnit.MINUTES, throttle.getTimeUnit());
+            countThrottle++;
+            break;
+          case GLOBAL_BYPASS:
+            countGlobalBypass++;
+            break;
+          default:
+            fail("unexpected settings type: " + settings.getQuotaType());
+        }
+      }
+      assertEquals(1, countThrottle);
+      assertEquals(1, countGlobalBypass);
+    } finally {
+      scanner.close();
+    }
+
+    admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName));
+    assertNumResults(1, null);
+    admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, false));
+    assertNumResults(0, null);
+  }
+
+  @Test
+  public void testQuotaRetrieverFilter() throws Exception {
+    Admin admin = TEST_UTIL.getHBaseAdmin();
+    TableName[] tables = new TableName[] {
+      TableName.valueOf("T0"), TableName.valueOf("T01"), TableName.valueOf("NS0:T2"),
+    };
+    String[] namespaces = new String[] { "NS0", "NS01", "NS2" };
+    String[] users = new String[] { "User0", "User01", "User2" };
+
+    for (String user: users) {
+      admin.setQuota(QuotaSettingsFactory
+        .throttleUser(user, ThrottleType.REQUEST_NUMBER, 1, TimeUnit.MINUTES));
+
+      for (TableName table: tables) {
+        admin.setQuota(QuotaSettingsFactory
+          .throttleUser(user, table, ThrottleType.REQUEST_NUMBER, 2, TimeUnit.MINUTES));
+      }
+
+      for (String ns: namespaces) {
+        admin.setQuota(QuotaSettingsFactory
+          .throttleUser(user, ns, ThrottleType.REQUEST_NUMBER, 3, TimeUnit.MINUTES));
+      }
+    }
+    assertNumResults(21, null);
+
+    for (TableName table: tables) {
+      admin.setQuota(QuotaSettingsFactory
+        .throttleTable(table, ThrottleType.REQUEST_NUMBER, 4, TimeUnit.MINUTES));
+    }
+    assertNumResults(24, null);
+
+    for (String ns: namespaces) {
+      admin.setQuota(QuotaSettingsFactory
+        .throttleNamespace(ns, ThrottleType.REQUEST_NUMBER, 5, TimeUnit.MINUTES));
+    }
+    assertNumResults(27, null);
+
+    assertNumResults(7, new QuotaFilter().setUserFilter("User0"));
+    assertNumResults(0, new QuotaFilter().setUserFilter("User"));
+    assertNumResults(21, new QuotaFilter().setUserFilter("User.*"));
+    assertNumResults(3, new QuotaFilter().setUserFilter("User.*").setTableFilter("T0"));
+    assertNumResults(3, new QuotaFilter().setUserFilter("User.*").setTableFilter("NS.*"));
+    assertNumResults(0, new QuotaFilter().setUserFilter("User.*").setTableFilter("T"));
+    assertNumResults(6, new QuotaFilter().setUserFilter("User.*").setTableFilter("T.*"));
+    assertNumResults(3, new QuotaFilter().setUserFilter("User.*").setNamespaceFilter("NS0"));
+    assertNumResults(0, new QuotaFilter().setUserFilter("User.*").setNamespaceFilter("NS"));
+    assertNumResults(9, new QuotaFilter().setUserFilter("User.*").setNamespaceFilter("NS.*"));
+    assertNumResults(6, new QuotaFilter().setUserFilter("User.*")
+                                            .setTableFilter("T0").setNamespaceFilter("NS0"));
+    assertNumResults(1, new QuotaFilter().setTableFilter("T0"));
+    assertNumResults(0, new QuotaFilter().setTableFilter("T"));
+    assertNumResults(2, new QuotaFilter().setTableFilter("T.*"));
+    assertNumResults(3, new QuotaFilter().setTableFilter(".*T.*"));
+    assertNumResults(1, new QuotaFilter().setNamespaceFilter("NS0"));
+    assertNumResults(0, new QuotaFilter().setNamespaceFilter("NS"));
+    assertNumResults(3, new QuotaFilter().setNamespaceFilter("NS.*"));
+
+    for (String user: users) {
+      admin.setQuota(QuotaSettingsFactory.unthrottleUser(user));
+      for (TableName table: tables) {
+        admin.setQuota(QuotaSettingsFactory.unthrottleUser(user, table));
+      }
+      for (String ns: namespaces) {
+        admin.setQuota(QuotaSettingsFactory.unthrottleUser(user, ns));
+      }
+    }
+    assertNumResults(6, null);
+
+    for (TableName table: tables) {
+      admin.setQuota(QuotaSettingsFactory.unthrottleTable(table));
+    }
+    assertNumResults(3, null);
+
+    for (String ns: namespaces) {
+      admin.setQuota(QuotaSettingsFactory.unthrottleNamespace(ns));
+    }
+    assertNumResults(0, null);
+  }
+
+  private void assertNumResults(int expected, final QuotaFilter filter) throws Exception {
+    assertEquals(expected, countResults(filter));
+  }
+
+  private int countResults(final QuotaFilter filter) throws Exception {
+    QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration(), filter);
+    try {
+      int count = 0;
+      for (QuotaSettings settings: scanner) {
+        LOG.debug(settings);
+        count++;
+      }
+      return count;
+    } finally {
+      scanner.close();
+    }
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java
new file mode 100644
index 0000000..b176e34
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({SmallTests.class})
+public class TestQuotaState {
+  private static final TableName UNKNOWN_TABLE_NAME = TableName.valueOf("unknownTable");
+
+  @Test(timeout=60000)
+  public void testQuotaStateBypass() {
+    QuotaState quotaInfo = new QuotaState();
+    assertTrue(quotaInfo.isBypass());
+    assertNoopLimiter(quotaInfo.getGlobalLimiter());
+
+    UserQuotaState userQuotaState = new UserQuotaState();
+    assertTrue(userQuotaState.isBypass());
+    assertNoopLimiter(userQuotaState.getTableLimiter(UNKNOWN_TABLE_NAME));
+  }
+
+  @Test(timeout=60000)
+  public void testSimpleQuotaStateOperation() {
+    final TableName table = TableName.valueOf("testSimpleQuotaStateOperationTable");
+    final int NUM_GLOBAL_THROTTLE = 3;
+    final int NUM_TABLE_THROTTLE = 2;
+
+    UserQuotaState quotaInfo = new UserQuotaState();
+    assertTrue(quotaInfo.isBypass());
+
+    // Set global quota
+    quotaInfo.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE));
+    assertFalse(quotaInfo.isBypass());
+
+    // Set table quota
+    quotaInfo.setQuotas(table, buildReqNumThrottle(NUM_TABLE_THROTTLE));
+    assertFalse(quotaInfo.isBypass());
+    assertTrue(quotaInfo.getGlobalLimiter() == quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME));
+    assertThrottleException(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME), NUM_GLOBAL_THROTTLE);
+    assertThrottleException(quotaInfo.getTableLimiter(table), NUM_TABLE_THROTTLE);
+  }
+
+  @Test(timeout=60000)
+  public void testQuotaStateUpdateBypassThrottle() {
+    final long LAST_UPDATE = 10;
+
+    UserQuotaState quotaInfo = new UserQuotaState();
+    assertEquals(0, quotaInfo.getLastUpdate());
+    assertTrue(quotaInfo.isBypass());
+
+    UserQuotaState otherQuotaState = new UserQuotaState(LAST_UPDATE);
+    assertEquals(LAST_UPDATE, otherQuotaState.getLastUpdate());
+    assertTrue(otherQuotaState.isBypass());
+
+    quotaInfo.update(otherQuotaState);
+    assertEquals(LAST_UPDATE, quotaInfo.getLastUpdate());
+    assertTrue(quotaInfo.isBypass());
+    assertTrue(quotaInfo.getGlobalLimiter() == quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME));
+    assertNoopLimiter(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME));
+  }
+
+  @Test(timeout=60000)
+  public void testQuotaStateUpdateGlobalThrottle() {
+    final int NUM_GLOBAL_THROTTLE_1 = 3;
+    final int NUM_GLOBAL_THROTTLE_2 = 11;
+    final long LAST_UPDATE_1 = 10;
+    final long LAST_UPDATE_2 = 20;
+    final long LAST_UPDATE_3 = 30;
+
+    QuotaState quotaInfo = new QuotaState();
+    assertEquals(0, quotaInfo.getLastUpdate());
+    assertTrue(quotaInfo.isBypass());
+
+    // Add global throttle
+    QuotaState otherQuotaState = new QuotaState(LAST_UPDATE_1);
+    otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_1));
+    assertEquals(LAST_UPDATE_1, otherQuotaState.getLastUpdate());
+    assertFalse(otherQuotaState.isBypass());
+
+    quotaInfo.update(otherQuotaState);
+    assertEquals(LAST_UPDATE_1, quotaInfo.getLastUpdate());
+    assertFalse(quotaInfo.isBypass());
+    assertThrottleException(quotaInfo.getGlobalLimiter(), NUM_GLOBAL_THROTTLE_1);
+
+    // Update global Throttle
+    otherQuotaState = new QuotaState(LAST_UPDATE_2);
+    otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_2));
+    assertEquals(LAST_UPDATE_2, otherQuotaState.getLastUpdate());
+    assertFalse(otherQuotaState.isBypass());
+
+    quotaInfo.update(otherQuotaState);
+    assertEquals(LAST_UPDATE_2, quotaInfo.getLastUpdate());
+    assertFalse(quotaInfo.isBypass());
+    assertThrottleException(quotaInfo.getGlobalLimiter(),
+        NUM_GLOBAL_THROTTLE_2 - NUM_GLOBAL_THROTTLE_1);
+
+    // Remove global throttle
+    otherQuotaState = new QuotaState(LAST_UPDATE_3);
+    assertEquals(LAST_UPDATE_3, otherQuotaState.getLastUpdate());
+    assertTrue(otherQuotaState.isBypass());
+
+    quotaInfo.update(otherQuotaState);
+    assertEquals(LAST_UPDATE_3, quotaInfo.getLastUpdate());
+    assertTrue(quotaInfo.isBypass());
+    assertNoopLimiter(quotaInfo.getGlobalLimiter());
+  }
+
+  @Test(timeout=60000)
+  public void testQuotaStateUpdateTableThrottle() {
+    final TableName TABLE_A = TableName.valueOf("TableA");
+    final TableName TABLE_B = TableName.valueOf("TableB");
+    final TableName TABLE_C = TableName.valueOf("TableC");
+    final int TABLE_A_THROTTLE_1 = 3;
+    final int TABLE_A_THROTTLE_2 = 11;
+    final int TABLE_B_THROTTLE = 4;
+    final int TABLE_C_THROTTLE = 5;
+    final long LAST_UPDATE_1 = 10;
+    final long LAST_UPDATE_2 = 20;
+    final long LAST_UPDATE_3 = 30;
+
+    UserQuotaState quotaInfo = new UserQuotaState();
+    assertEquals(0, quotaInfo.getLastUpdate());
+    assertTrue(quotaInfo.isBypass());
+
+    // Add A B table limiters
+    UserQuotaState otherQuotaState = new UserQuotaState(LAST_UPDATE_1);
+    otherQuotaState.setQuotas(TABLE_A, buildReqNumThrottle(TABLE_A_THROTTLE_1));
+    otherQuotaState.setQuotas(TABLE_B, buildReqNumThrottle(TABLE_B_THROTTLE));
+    assertEquals(LAST_UPDATE_1, otherQuotaState.getLastUpdate());
+    assertFalse(otherQuotaState.isBypass());
+
+    quotaInfo.update(otherQuotaState);
+    assertEquals(LAST_UPDATE_1, quotaInfo.getLastUpdate());
+    assertFalse(quotaInfo.isBypass());
+    assertThrottleException(quotaInfo.getTableLimiter(TABLE_A), TABLE_A_THROTTLE_1);
+    assertThrottleException(quotaInfo.getTableLimiter(TABLE_B), TABLE_B_THROTTLE);
+    assertNoopLimiter(quotaInfo.getTableLimiter(TABLE_C));
+
+    // Add C, Remove B, Update A table limiters
+    otherQuotaState = new UserQuotaState(LAST_UPDATE_2);
+    otherQuotaState.setQuotas(TABLE_A, buildReqNumThrottle(TABLE_A_THROTTLE_2));
+    otherQuotaState.setQuotas(TABLE_C, buildReqNumThrottle(TABLE_C_THROTTLE));
+    assertEquals(LAST_UPDATE_2, otherQuotaState.getLastUpdate());
+    assertFalse(otherQuotaState.isBypass());
+
+    quotaInfo.update(otherQuotaState);
+    assertEquals(LAST_UPDATE_2, quotaInfo.getLastUpdate());
+    assertFalse(quotaInfo.isBypass());
+    assertThrottleException(quotaInfo.getTableLimiter(TABLE_A),
+        TABLE_A_THROTTLE_2 - TABLE_A_THROTTLE_1);
+    assertThrottleException(quotaInfo.getTableLimiter(TABLE_C), TABLE_C_THROTTLE);
+    assertNoopLimiter(quotaInfo.getTableLimiter(TABLE_B));
+
+    // Remove table limiters
+    otherQuotaState = new UserQuotaState(LAST_UPDATE_3);
+    assertEquals(LAST_UPDATE_3, otherQuotaState.getLastUpdate());
+    assertTrue(otherQuotaState.isBypass());
+
+    quotaInfo.update(otherQuotaState);
+    assertEquals(LAST_UPDATE_3, quotaInfo.getLastUpdate());
+    assertTrue(quotaInfo.isBypass());
+    assertNoopLimiter(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME));
+  }
+
+  private Quotas buildReqNumThrottle(final long limit) {
+    return Quotas.newBuilder()
+            .setThrottle(Throttle.newBuilder()
+              .setReqNum(ProtobufUtil.toTimedQuota(limit, TimeUnit.MINUTES, QuotaScope.MACHINE))
+              .build())
+            .build();
+  }
+
+  private void assertThrottleException(final QuotaLimiter limiter, final int availReqs) {
+    assertNoThrottleException(limiter, availReqs);
+    try {
+      limiter.checkQuota(1, 1);
+      fail("Should have thrown ThrottlingException");
+    } catch (ThrottlingException e) {
+      // expected
+    }
+  }
+
+  private void assertNoThrottleException(final QuotaLimiter limiter, final int availReqs) {
+    for (int i = 0; i < availReqs; ++i) {
+      try {
+        limiter.checkQuota(1, 1);
+      } catch (ThrottlingException e) {
+        fail("Unexpected ThrottlingException after " + i + " requests. limit=" + availReqs);
+      }
+      limiter.grabQuota(1, 1);
+    }
+  }
+
+  private void assertNoopLimiter(final QuotaLimiter limiter) {
+    assertTrue(limiter == NoopQuotaLimiter.get());
+    assertNoThrottleException(limiter, 100);
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java
new file mode 100644
index 0000000..b3d4253
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test the quota table helpers (e.g. CRUD operations)
+ */
+@Category({MediumTests.class})
+public class TestQuotaTableUtil {
+  final Log LOG = LogFactory.getLog(getClass());
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private Connection connection;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
+    TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, 2000);
+    TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
+    TEST_UTIL.startMiniCluster(1);
+    TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void before() throws IOException {
+    this.connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+  }
+
+  @After
+  public void after() throws IOException {
+    this.connection.close();
+  }
+
+  @Test
+  public void testTableQuotaUtil() throws Exception {
+    final TableName table = TableName.valueOf("testTableQuotaUtilTable");
+
+    Quotas quota = Quotas.newBuilder()
+              .setThrottle(Throttle.newBuilder()
+                .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setReadSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .build())
+              .build();
+
+    // Add user quota and verify it
+    QuotaUtil.addTableQuota(this.connection, table, quota);
+    Quotas resQuota = QuotaUtil.getTableQuota(this.connection, table);
+    assertEquals(quota, resQuota);
+
+    // Remove user quota and verify it
+    QuotaUtil.deleteTableQuota(this.connection, table);
+    resQuota = QuotaUtil.getTableQuota(this.connection, table);
+    assertEquals(null, resQuota);
+  }
+
+  @Test
+  public void testNamespaceQuotaUtil() throws Exception {
+    final String namespace = "testNamespaceQuotaUtilNS";
+
+    Quotas quota = Quotas.newBuilder()
+              .setThrottle(Throttle.newBuilder()
+                .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setReadSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .build())
+              .build();
+
+    // Add user quota and verify it
+    QuotaUtil.addNamespaceQuota(this.connection, namespace, quota);
+    Quotas resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace);
+    assertEquals(quota, resQuota);
+
+    // Remove user quota and verify it
+    QuotaUtil.deleteNamespaceQuota(this.connection, namespace);
+    resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace);
+    assertEquals(null, resQuota);
+  }
+
+  @Test
+  public void testUserQuotaUtil() throws Exception {
+    final TableName table = TableName.valueOf("testUserQuotaUtilTable");
+    final String namespace = "testNS";
+    final String user = "testUser";
+
+    Quotas quotaNamespace = Quotas.newBuilder()
+              .setThrottle(Throttle.newBuilder()
+                .setReqNum(ProtobufUtil.toTimedQuota(50000, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .build())
+              .build();
+    Quotas quotaTable = Quotas.newBuilder()
+              .setThrottle(Throttle.newBuilder()
+                .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setReadSize(ProtobufUtil.toTimedQuota(10000, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .build())
+              .build();
+    Quotas quota = Quotas.newBuilder()
+              .setThrottle(Throttle.newBuilder()
+                .setReqSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setWriteSize(ProtobufUtil.toTimedQuota(4096, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .setReadNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE))
+                .build())
+              .build();
+
+    // Add user global quota
+    QuotaUtil.addUserQuota(this.connection, user, quota);
+    Quotas resQuota = QuotaUtil.getUserQuota(this.connection, user);
+    assertEquals(quota, resQuota);
+
+    // Add user quota for table
+    QuotaUtil.addUserQuota(this.connection, user, table, quotaTable);
+    Quotas resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table);
+    assertEquals(quotaTable, resQuotaTable);
+
+    // Add user quota for namespace
+    QuotaUtil.addUserQuota(this.connection, user, namespace, quotaNamespace);
+    Quotas resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace);
+    assertEquals(quotaNamespace, resQuotaNS);
+
+    // Delete user global quota
+    QuotaUtil.deleteUserQuota(this.connection, user);
+    resQuota = QuotaUtil.getUserQuota(this.connection, user);
+    assertEquals(null, resQuota);
+
+    // Delete user quota for table
+    QuotaUtil.deleteUserQuota(this.connection, user, table);
+    resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table);
+    assertEquals(null, resQuotaTable);
+
+    // Delete user quota for namespace
+    QuotaUtil.deleteUserQuota(this.connection, user, namespace);
+    resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace);
+    assertEquals(null, resQuotaNS);
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
new file mode 100644
index 0000000..6ab2099
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
@@ -0,0 +1,422 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
+import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+
+@Category({MediumTests.class})
+public class TestQuotaThrottle {
+  final Log LOG = LogFactory.getLog(getClass());
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private final static byte[] FAMILY = Bytes.toBytes("cf");
+  private final static byte[] QUALIFIER = Bytes.toBytes("q");
+
+  private final static TableName[] TABLE_NAMES = new TableName[] {
+    TableName.valueOf("TestQuotaAdmin0"),
+    TableName.valueOf("TestQuotaAdmin1"),
+    TableName.valueOf("TestQuotaAdmin2")
+  };
+
+  private static HTable[] tables;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
+    TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
+    TEST_UTIL.startMiniCluster(1);
+    TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
+    QuotaCache.TEST_FORCE_REFRESH = true;
+
+    tables = new HTable[TABLE_NAMES.length];
+    for (int i = 0; i < TABLE_NAMES.length; ++i) {
+      tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
+    }
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    for (int i = 0; i < tables.length; ++i) {
+      if (tables[i] != null) {
+        tables[i].close();
+        TEST_UTIL.deleteTable(TABLE_NAMES[i]);
+      }
+    }
+
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    for (RegionServerThread rst: TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
+      RegionServerQuotaManager quotaManager = rst.getRegionServer().getRegionServerQuotaManager();
+      QuotaCache quotaCache = quotaManager.getQuotaCache();
+      quotaCache.getNamespaceQuotaCache().clear();
+      quotaCache.getTableQuotaCache().clear();
+      quotaCache.getUserQuotaCache().clear();
+    }
+  }
+
+  @Test(timeout=60000)
+  public void testUserGlobalThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getHBaseAdmin();
+    final String userName = User.getCurrent().getShortName();
+
+    // Add 6req/min limit
+    admin.setQuota(QuotaSettingsFactory
+      .throttleUser(userName, ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerUserCacheRefresh(false, TABLE_NAMES);
+
+    // should execute at max 6 requests
+    assertEquals(6, doPuts(100, tables));
+
+    // wait a minute and you should get other 6 requests executed
+    waitMinuteQuota();
+    assertEquals(6, doPuts(100, tables));
+
+    // Remove all the limits
+    admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName));
+    triggerUserCacheRefresh(true, TABLE_NAMES);
+    assertEquals(60, doPuts(60, tables));
+    assertEquals(60, doGets(60, tables));
+  }
+
+  @Test(timeout=60000)
+  public void testUserTableThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getHBaseAdmin();
+    final String userName = User.getCurrent().getShortName();
+
+    // Add 6req/min limit
+    admin.setQuota(QuotaSettingsFactory
+      .throttleUser(userName, TABLE_NAMES[0], ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerUserCacheRefresh(false, TABLE_NAMES[0]);
+
+    // should execute at max 6 requests on tables[0] and have no limit on tables[1]
+    assertEquals(6, doPuts(100, tables[0]));
+    assertEquals(30, doPuts(30, tables[1]));
+
+    // wait a minute and you should get other 6 requests executed
+    waitMinuteQuota();
+    assertEquals(6, doPuts(100, tables[0]));
+
+    // Remove all the limits
+    admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName, TABLE_NAMES[0]));
+    triggerUserCacheRefresh(true, TABLE_NAMES);
+    assertEquals(60, doPuts(60, tables));
+    assertEquals(60, doGets(60, tables));
+  }
+
+  @Test(timeout=60000)
+  public void testUserNamespaceThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getHBaseAdmin();
+    final String userName = User.getCurrent().getShortName();
+    final String NAMESPACE = "default";
+
+    // Add 6req/min limit
+    admin.setQuota(QuotaSettingsFactory
+      .throttleUser(userName, NAMESPACE, ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerUserCacheRefresh(false, TABLE_NAMES[0]);
+
+    // should execute at max 6 requests on tables[0] and have no limit on tables[1]
+    assertEquals(6, doPuts(100, tables[0]));
+
+    // wait a minute and you should get other 6 requests executed
+    waitMinuteQuota();
+    assertEquals(6, doPuts(100, tables[1]));
+
+    // Remove all the limits
+    admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName, NAMESPACE));
+    triggerUserCacheRefresh(true, TABLE_NAMES);
+    assertEquals(60, doPuts(60, tables));
+    assertEquals(60, doGets(60, tables));
+  }
+
+  @Test(timeout=60000)
+  public void testTableGlobalThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getHBaseAdmin();
+
+    // Add 6req/min limit
+    admin.setQuota(QuotaSettingsFactory
+      .throttleTable(TABLE_NAMES[0], ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerTableCacheRefresh(false, TABLE_NAMES[0]);
+
+    // should execute at max 6 requests
+    assertEquals(6, doPuts(100, tables[0]));
+    // should have no limits
+    assertEquals(30, doPuts(30, tables[1]));
+
+    // wait a minute and you should get other 6 requests executed
+    waitMinuteQuota();
+    assertEquals(6, doPuts(100, tables[0]));
+
+    // Remove all the limits
+    admin.setQuota(QuotaSettingsFactory.unthrottleTable(TABLE_NAMES[0]));
+    triggerTableCacheRefresh(true, TABLE_NAMES[0]);
+    assertEquals(80, doGets(80, tables[0], tables[1]));
+  }
+
+  @Test(timeout=60000)
+  public void testNamespaceGlobalThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getHBaseAdmin();
+    final String NAMESPACE = "default";
+
+    // Add 6req/min limit
+    admin.setQuota(QuotaSettingsFactory
+      .throttleNamespace(NAMESPACE, ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerNamespaceCacheRefresh(false, TABLE_NAMES[0]);
+
+    // should execute at max 6 requests
+    assertEquals(6, doPuts(100, tables[0]));
+
+    // wait a minute and you should get other 6 requests executed
+    waitMinuteQuota();
+    assertEquals(6, doPuts(100, tables[1]));
+
+    admin.setQuota(QuotaSettingsFactory.unthrottleNamespace(NAMESPACE));
+    triggerNamespaceCacheRefresh(true, TABLE_NAMES[0]);
+    assertEquals(40, doPuts(40, tables[0]));
+  }
+
+  @Test(timeout=60000)
+  public void testUserAndTableThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getHBaseAdmin();
+    final String userName = User.getCurrent().getShortName();
+
+    // Add 6req/min limit for the user on tables[0]
+    admin.setQuota(QuotaSettingsFactory
+      .throttleUser(userName, TABLE_NAMES[0], ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerUserCacheRefresh(false, TABLE_NAMES[0]);
+    // Add 12req/min limit for the user
+    admin.setQuota(QuotaSettingsFactory
+      .throttleUser(userName, ThrottleType.REQUEST_NUMBER, 12, TimeUnit.MINUTES));
+    triggerUserCacheRefresh(false, TABLE_NAMES[1], TABLE_NAMES[2]);
+    // Add 8req/min limit for the tables[1]
+    admin.setQuota(QuotaSettingsFactory
+      .throttleTable(TABLE_NAMES[1], ThrottleType.REQUEST_NUMBER, 8, TimeUnit.MINUTES));
+    triggerTableCacheRefresh(false, TABLE_NAMES[1]);
+    // Add a lower table level throttle on tables[0]
+    admin.setQuota(QuotaSettingsFactory
+      .throttleTable(TABLE_NAMES[0], ThrottleType.REQUEST_NUMBER, 3, TimeUnit.MINUTES));
+    triggerTableCacheRefresh(false, TABLE_NAMES[0]);
+
+    // should execute at max 12 requests
+    assertEquals(12, doGets(100, tables[2]));
+
+    // should execute at max 8 requests
+    waitMinuteQuota();
+    assertEquals(8, doGets(100, tables[1]));
+
+    // should execute at max 3 requests
+    waitMinuteQuota();
+    assertEquals(3, doPuts(100, tables[0]));
+
+    // Remove all the throttling rules
+    admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName, TABLE_NAMES[0]));
+    admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName));
+    triggerUserCacheRefresh(true, TABLE_NAMES[0], TABLE_NAMES[1]);
+
+    admin.setQuota(QuotaSettingsFactory.unthrottleTable(TABLE_NAMES[1]));
+    triggerTableCacheRefresh(true, TABLE_NAMES[1]);
+    waitMinuteQuota();
+    assertEquals(40, doGets(40, tables[1]));
+
+    admin.setQuota(QuotaSettingsFactory.unthrottleTable(TABLE_NAMES[0]));
+    triggerTableCacheRefresh(true, TABLE_NAMES[0]);
+    waitMinuteQuota();
+    assertEquals(40, doGets(40, tables[0]));
+  }
+
+  @Test(timeout=60000)
+  public void testUserGlobalBypassThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getHBaseAdmin();
+    final String userName = User.getCurrent().getShortName();
+    final String NAMESPACE = "default";
+
+    // Add 6req/min limit for tables[0]
+    admin.setQuota(QuotaSettingsFactory
+      .throttleTable(TABLE_NAMES[0], ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerTableCacheRefresh(false, TABLE_NAMES[0]);
+    // Add 13req/min limit for the user
+    admin.setQuota(QuotaSettingsFactory
+      .throttleNamespace(NAMESPACE, ThrottleType.REQUEST_NUMBER, 13, TimeUnit.MINUTES));
+    triggerNamespaceCacheRefresh(false, TABLE_NAMES[1]);
+
+    // should execute at max 6 requests on table[0] and (13 - 6) on table[1]
+    assertEquals(6, doPuts(100, tables[0]));
+    assertEquals(7, doGets(100, tables[1]));
+    waitMinuteQuota();
+
+    // Set the global bypass for the user
+    admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, true));
+    admin.setQuota(QuotaSettingsFactory
+      .throttleUser(userName, TABLE_NAMES[2], ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES));
+    triggerUserCacheRefresh(false, TABLE_NAMES[2]);
+    assertEquals(30, doGets(30, tables[0]));
+    assertEquals(30, doGets(30, tables[1]));
+    waitMinuteQuota();
+
+    // Remove the global bypass
+    // should execute at max 6 requests on table[0] and (13 - 6) on table[1]
+    admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, false));
+    admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName, TABLE_NAMES[2]));
+    triggerUserCacheRefresh(true, TABLE_NAMES[2]);
+    assertEquals(6, doPuts(100, tables[0]));
+    assertEquals(7, doGets(100, tables[1]));
+
+    // unset throttle
+    admin.setQuota(QuotaSettingsFactory.unthrottleTable(TABLE_NAMES[0]));
+    admin.setQuota(QuotaSettingsFactory.unthrottleNamespace(NAMESPACE));
+    waitMinuteQuota();
+    triggerTableCacheRefresh(true, TABLE_NAMES[0]);
+    triggerNamespaceCacheRefresh(true, TABLE_NAMES[1]);
+    assertEquals(30, doGets(30, tables[0]));
+    assertEquals(30, doGets(30, tables[1]));
+  }
+
+  private int doPuts(int maxOps, final HTable... tables) throws Exception {
+    int count = 0;
+    try {
+      while (count < maxOps) {
+        Put put = new Put(Bytes.toBytes("row-" + count));
+        put.add(FAMILY, QUALIFIER, Bytes.toBytes("data-" + count));
+        for (final HTable table: tables) {
+          table.put(put);
+        }
+        count += tables.length;
+      }
+    } catch (RetriesExhaustedWithDetailsException e) {
+      for (Throwable t: e.getCauses()) {
+        if (!(t instanceof ThrottlingException)) {
+          throw e;
+        }
+      }
+      LOG.error("put failed after nRetries=" + count, e);
+    }
+    return count;
+  }
+
+  private long doGets(int maxOps, final HTable... tables) throws Exception {
+    int count = 0;
+    try {
+      while (count < maxOps) {
+        Get get = new Get(Bytes.toBytes("row-" + count));
+        for (final HTable table: tables) {
+          table.get(get);
+        }
+        count += tables.length;
+      }
+    } catch (ThrottlingException e) {
+      LOG.error("get failed after nRetries=" + count, e);
+    }
+    return count;
+  }
+
+  private void triggerUserCacheRefresh(boolean bypass, TableName... tables) throws Exception {
+    triggerCacheRefresh(bypass, true, false, false, tables);
+  }
+
+  private void triggerTableCacheRefresh(boolean bypass, TableName... tables) throws Exception {
+    triggerCacheRefresh(bypass, false, true, false, tables);
+  }
+
+  private void triggerNamespaceCacheRefresh(boolean bypass, TableName... tables) throws Exception {
+    triggerCacheRefresh(bypass, false, false, true, tables);
+  }
+
+  private void triggerCacheRefresh(boolean bypass, boolean userLimiter, boolean tableLimiter,
+      boolean nsLimiter, final TableName... tables) throws Exception {
+    for (RegionServerThread rst: TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
+      RegionServerQuotaManager quotaManager = rst.getRegionServer().getRegionServerQuotaManager();
+      QuotaCache quotaCache = quotaManager.getQuotaCache();
+
+      quotaCache.triggerCacheRefresh();
+      Thread.sleep(250);
+
+      for (TableName table: tables) {
+        quotaCache.getTableLimiter(table);
+      }
+
+      boolean isUpdated = false;
+      while (!isUpdated) {
+        isUpdated = true;
+        for (TableName table: tables) {
+          boolean isBypass = true;
+          if (userLimiter) {
+            isBypass &= quotaCache.getUserLimiter(User.getCurrent().getUGI(), table).isBypass();
+          }
+          if (tableLimiter) {
+            isBypass &= quotaCache.getTableLimiter(table).isBypass();
+          }
+          if (nsLimiter) {
+            isBypass &= quotaCache.getNamespaceLimiter(table.getNamespaceAsString()).isBypass();
+          }
+          if (isBypass != bypass) {
+            isUpdated = false;
+            Thread.sleep(250);
+            break;
+          }
+        }
+      }
+
+      LOG.debug("QuotaCache");
+      LOG.debug(quotaCache.getNamespaceQuotaCache());
+      LOG.debug(quotaCache.getTableQuotaCache());
+      LOG.debug(quotaCache.getUserQuotaCache());
+    }
+  }
+
+  private void waitMinuteQuota() {
+    EnvironmentEdgeManagerTestHelper.injectEdge(
+      new IncrementingEnvironmentEdge(
+        EnvironmentEdgeManager.currentTime() + 70000));
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
new file mode 100644
index 0000000..9d248ab
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Verify the behaviour of the Rate Limiter.
+ */
+@Category({SmallTests.class})
+public class TestRateLimiter {
+  @Test
+  public void testWaitIntervalTimeUnitSeconds() {
+    testWaitInterval(TimeUnit.SECONDS, 10, 100);
+  }
+
+  @Test
+  public void testWaitIntervalTimeUnitMinutes() {
+    testWaitInterval(TimeUnit.MINUTES, 10, 6000);
+  }
+
+  @Test
+  public void testWaitIntervalTimeUnitHours() {
+    testWaitInterval(TimeUnit.HOURS, 10, 360000);
+  }
+
+  @Test
+  public void testWaitIntervalTimeUnitDays() {
+    testWaitInterval(TimeUnit.DAYS, 10, 8640000);
+  }
+
+  private void testWaitInterval(final TimeUnit timeUnit, final long limit,
+      final long expectedWaitInterval) {
+    RateLimiter limiter = new RateLimiter();
+    limiter.set(limit, timeUnit);
+
+    long nowTs = 0;
+    long lastTs = 0;
+
+    // consume all the available resources, one request at the time.
+    // the wait interval should be 0
+    for (int i = 0; i < (limit - 1); ++i) {
+      assertTrue(limiter.canExecute(nowTs, lastTs));
+      limiter.consume();
+      long waitInterval = limiter.waitInterval();
+      assertEquals(0, waitInterval);
+    }
+
+    for (int i = 0; i < (limit * 4); ++i) {
+      // There is one resource available, so we should be able to
+      // consume it without waiting.
+      assertTrue(limiter.canExecute(nowTs, lastTs));
+      assertEquals(0, limiter.waitInterval());
+      limiter.consume();
+      lastTs = nowTs;
+
+      // No more resources are available, we should wait for at least an interval.
+      long waitInterval = limiter.waitInterval();
+      assertEquals(expectedWaitInterval, waitInterval);
+
+      // set the nowTs to be the exact time when resources should be available again.
+      nowTs += waitInterval;
+
+      // artificially go into the past to prove that when too early we should fail.
+      assertFalse(limiter.canExecute(nowTs - 500, lastTs));
+    }
+  }
+
+  @Test
+  public void testOverconsumption() {
+    RateLimiter limiter = new RateLimiter();
+    limiter.set(10, TimeUnit.SECONDS);
+
+    // 10 resources are available, but we need to consume 20 resources
+    // Verify that we have to wait at least 1.1sec to have 1 resource available
+    assertTrue(limiter.canExecute(0, 0));
+    limiter.consume(20);
+    assertEquals(1100, limiter.waitInterval());
+
+    // Verify that after 1sec we need to wait for another 0.1sec to get a resource available
+    assertFalse(limiter.canExecute(1000, 0));
+    assertEquals(100, limiter.waitInterval());
+
+    // Verify that after 1.1sec the resource is available
+    assertTrue(limiter.canExecute(1100, 0));
+    assertEquals(0, limiter.waitInterval());
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index aac9757..06e4960 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2502,4 +2502,67 @@ public class TestAccessController extends SecureTestUtil {
     verifyAllowed(replicateLogEntriesAction, SUPERUSER, USER_ADMIN);
     verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
+  
+  @Test
+  public void testSetQuota() throws Exception {
+    AccessTestAction setUserQuotaAction = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preSetUserQuota(ObserverContext.createAndPrepare(CP_ENV, null),
+          null, null);
+        return null;
+      }
+    };
+
+    AccessTestAction setUserTableQuotaAction = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preSetUserQuota(ObserverContext.createAndPrepare(CP_ENV, null),
+          null, TEST_TABLE.getTableName(), null);
+        return null;
+      }
+    };
+
+    AccessTestAction setUserNamespaceQuotaAction = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preSetUserQuota(ObserverContext.createAndPrepare(CP_ENV, null),
+          null, (String)null, null);
+        return null;
+      }
+    };
+
+    AccessTestAction setTableQuotaAction = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preSetTableQuota(ObserverContext.createAndPrepare(CP_ENV, null),
+          TEST_TABLE.getTableName(), null);
+        return null;
+      }
+    };
+
+    AccessTestAction setNamespaceQuotaAction = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preSetNamespaceQuota(ObserverContext.createAndPrepare(CP_ENV, null),
+          null, null);
+        return null;
+      }
+    };
+
+    verifyAllowed(setUserQuotaAction, SUPERUSER, USER_ADMIN);
+    verifyDenied(setUserQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+
+    verifyAllowed(setUserTableQuotaAction, SUPERUSER, USER_ADMIN, USER_OWNER);
+    verifyDenied(setUserTableQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE);
+
+    verifyAllowed(setUserNamespaceQuotaAction, SUPERUSER, USER_ADMIN);
+    verifyDenied(setUserNamespaceQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+
+    verifyAllowed(setTableQuotaAction, SUPERUSER, USER_ADMIN, USER_OWNER);
+    verifyDenied(setTableQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE);
+
+    verifyAllowed(setNamespaceQuotaAction, SUPERUSER, USER_ADMIN);
+    verifyDenied(setNamespaceQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
 }
diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb
index d5fb5f6..281d18c 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -71,6 +71,12 @@ module HBaseConstants
   TABLE_CFS = 'TABLE_CFS'
   CONFIG = 'CONFIG'
   DATA = 'DATA'
+  USER = 'USER'
+  TABLE = 'TABLE'
+  NAMESPACE = 'NAMESPACE'
+  TYPE = 'TYPE'
+  NONE = 'NONE'
+  VALUE = 'VALUE'
 
   # Load constants from hbase java API
   def self.promote_constants(constants)
@@ -90,6 +96,10 @@ end
 require 'hbase/hbase'
 require 'hbase/admin'
 require 'hbase/table'
+require 'hbase/quotas'
 require 'hbase/replication_admin'
 require 'hbase/security'
 require 'hbase/visibility_labels'
+
+
+include HBaseQuotasConstants
\ No newline at end of file
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index 89700a4..a17ece2 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -21,6 +21,7 @@ include Java
 
 require 'hbase/admin'
 require 'hbase/table'
+require 'hbase/quotas'
 require 'hbase/security'
 require 'hbase/visibility_labels'
 
@@ -62,6 +63,10 @@ module Hbase
     def visibility_labels_admin(formatter)
       ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin, formatter)
     end
+    
+    def quotas_admin(formatter)
+      ::Hbase::QuotasAdmin.new(@connection.getAdmin, formatter)
+    end
 
     def shutdown
       @connection.close
diff --git a/hbase-shell/src/main/ruby/hbase/quotas.rb b/hbase-shell/src/main/ruby/hbase/quotas.rb
new file mode 100644
index 0000000..fa076a5
--- /dev/null
+++ b/hbase-shell/src/main/ruby/hbase/quotas.rb
@@ -0,0 +1,216 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include Java
+java_import java.util.concurrent.TimeUnit
+java_import org.apache.hadoop.hbase.TableName
+java_import org.apache.hadoop.hbase.quotas.ThrottleType
+java_import org.apache.hadoop.hbase.quotas.QuotaFilter
+java_import org.apache.hadoop.hbase.quotas.QuotaRetriever
+java_import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory
+
+module HBaseQuotasConstants
+  GLOBAL_BYPASS = 'GLOBAL_BYPASS'
+  THROTTLE_TYPE = 'THROTTLE_TYPE'
+  THROTTLE = 'THROTTLE'
+  REQUEST = 'REQUEST'
+end
+
+module Hbase
+  class QuotasAdmin
+    def initialize(admin, formatter)
+      @admin = admin
+      @formatter = formatter
+    end
+
+    def close
+      @admin.close
+    end
+
+    def throttle(args)
+      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
+      type = args.fetch(THROTTLE_TYPE, REQUEST)
+      type, limit, time_unit = _parse_limit(args.delete(LIMIT), ThrottleType, type)
+      if args.has_key?(USER)
+        user = args.delete(USER)
+        if args.has_key?(TABLE)
+          table = TableName.valueOf(args.delete(TABLE))
+          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          settings = QuotaSettingsFactory.throttleUser(user, table, type, limit, time_unit)
+        elsif args.has_key?(NAMESPACE)
+          namespace = args.delete(NAMESPACE)
+          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          settings = QuotaSettingsFactory.throttleUser(user, namespace, type, limit, time_unit)
+        else
+          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          settings = QuotaSettingsFactory.throttleUser(user, type, limit, time_unit)
+        end
+      elsif args.has_key?(TABLE)
+        table = TableName.valueOf(args.delete(TABLE))
+        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        settings = QuotaSettingsFactory.throttleTable(table, type, limit, time_unit)
+      elsif args.has_key?(NAMESPACE)
+        namespace = args.delete(NAMESPACE)
+        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        settings = QuotaSettingsFactory.throttleNamespace(namespace, type, limit, time_unit)
+      else
+        raise "One of USER, TABLE or NAMESPACE must be specified"
+      end
+      @admin.setQuota(settings)
+    end
+
+    def unthrottle(args)
+      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
+      if args.has_key?(USER)
+        user = args.delete(USER)
+        if args.has_key?(TABLE)
+          table = TableName.valueOf(args.delete(TABLE))
+          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          settings = QuotaSettingsFactory.unthrottleUser(user, table)
+        elsif args.has_key?(NAMESPACE)
+          namespace = args.delete(NAMESPACE)
+          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          settings = QuotaSettingsFactory.unthrottleUser(user, namespace)
+        else
+          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          settings = QuotaSettingsFactory.unthrottleUser(user)
+        end
+      elsif args.has_key?(TABLE)
+        table = TableName.valueOf(args.delete(TABLE))
+        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        settings = QuotaSettingsFactory.unthrottleTable(table)
+      elsif args.has_key?(NAMESPACE)
+        namespace = args.delete(NAMESPACE)
+        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        settings = QuotaSettingsFactory.unthrottleNamespace(namespace)
+      else
+        raise "One of USER, TABLE or NAMESPACE must be specified"
+      end
+      @admin.setQuota(settings)
+    end
+
+    def set_global_bypass(bypass, args)
+      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
+
+      if args.has_key?(USER)
+        user = args.delete(USER)
+        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        settings = QuotaSettingsFactory.bypassGlobals(user, bypass)
+      else
+        raise "Expected USER"
+      end
+      @admin.setQuota(settings)
+    end
+
+    def list_quotas(args = {})
+      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
+
+      limit = args.delete("LIMIT") || -1
+      count = 0
+
+      filter = QuotaFilter.new()
+      filter.setUserFilter(args.delete(USER)) if args.has_key?(USER)
+      filter.setTableFilter(args.delete(TABLE)) if args.has_key?(TABLE)
+      filter.setNamespaceFilter(args.delete(NAMESPACE)) if args.has_key?(NAMESPACE)
+      raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+
+      # Start the scanner
+      scanner = @admin.getQuotaRetriever(filter)
+      begin
+        iter = scanner.iterator
+
+        # Iterate results
+        while iter.hasNext
+          if limit > 0 && count >= limit
+            break
+          end
+
+          settings = iter.next
+          owner = {
+            USER => settings.getUserName(),
+            TABLE => settings.getTableName(),
+            NAMESPACE => settings.getNamespace(),
+          }.delete_if { |k, v| v.nil? }.map {|k, v| k.to_s + " => " + v.to_s} * ', '
+
+          yield owner, settings.to_s
+
+          count += 1
+        end
+      ensure
+        scanner.close()
+      end
+
+      return count
+    end
+
+    def _parse_size(str_limit)
+      str_limit = str_limit.downcase
+      match = /(\d+)([bkmgtp%]*)/.match(str_limit)
+      if match
+        if match[2] == '%'
+          return match[1].to_i
+        else
+          return _size_from_str(match[1].to_i, match[2])
+        end
+      else
+        raise "Invalid size limit syntax"
+      end
+    end
+
+    def _parse_limit(str_limit, type_cls, type)
+      str_limit = str_limit.downcase
+      match = /(\d+)(req|[bkmgtp])\/(sec|min|hour|day)/.match(str_limit)
+      if match
+        if match[2] == 'req'
+          limit = match[1].to_i
+          type = type_cls.valueOf(type + "_NUMBER")
+        else
+          limit = _size_from_str(match[1].to_i, match[2])
+          type = type_cls.valueOf(type + "_SIZE")
+        end
+
+        if limit <= 0
+          raise "Invalid throttle limit, must be greater then 0"
+        end
+
+        case match[3]
+          when 'sec'  then time_unit = TimeUnit::SECONDS
+          when 'min'  then time_unit = TimeUnit::MINUTES
+          when 'hour' then time_unit = TimeUnit::HOURS
+          when 'day'  then time_unit = TimeUnit::DAYS
+        end
+
+        return type, limit, time_unit
+      else
+        raise "Invalid throttle limit syntax"
+      end
+    end
+
+    def _size_from_str(value, suffix)
+      case suffix
+        when 'k' then value <<= 10
+        when 'm' then value <<= 20
+        when 'g' then value <<= 30
+        when 't' then value <<= 40
+        when 'p' then value <<= 50
+      end
+      return value
+    end
+  end
+end
\ No newline at end of file
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 1879d7c..4361409 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -99,6 +99,10 @@ module Shell
     def hbase_visibility_labels_admin
       @hbase_visibility_labels_admin ||= hbase.visibility_labels_admin(formatter)
     end
+    
+    def hbase_quotas_admin
+      @hbase_quotas_admin ||= hbase.quotas_admin(formatter)
+    end
 
     def export_commands(where)
       ::Shell.commands.keys.each do |cmd|
@@ -371,6 +375,15 @@ Shell.load_command_group(
 )
 
 Shell.load_command_group(
+  'quotas',
+  :full_name => 'CLUSTER QUOTAS TOOLS',
+  :commands => %w[
+    set_quota
+    list_quotas
+  ]
+)
+
+Shell.load_command_group(
   'security',
   :full_name => 'SECURITY TOOLS',
   :comment => "NOTE: Above commands are only applicable if running with the AccessController coprocessor",
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index 54fa204..625f36d 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -65,6 +65,10 @@ module Shell
       def visibility_labels_admin
         @shell.hbase_visibility_labels_admin
       end
+      
+      def quotas_admin
+        @shell.hbase_quotas_admin
+      end
 
       #----------------------------------------------------------------------
 
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb b/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb
new file mode 100644
index 0000000..682bb71
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb
@@ -0,0 +1,52 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class ListQuotas < Command
+      def help
+        return <<-EOF
+List the quota settings added to the system.
+You can filter the result based on USER, TABLE, or NAMESPACE.
+
+For example:
+
+    hbase> list_quotas
+    hbase> list_quotas USER => 'bob.*'
+    hbase> list_quotas USER => 'bob.*', TABLE => 't1'
+    hbase> list_quotas USER => 'bob.*', NAMESPACE => 'ns.*'
+    hbase> list_quotas TABLE => 'myTable'
+    hbase> list_quotas NAMESPACE => 'ns.*'
+EOF
+      end
+
+      def command(args = {})
+        now = Time.now
+        formatter.header(["OWNER", "QUOTAS"])
+
+        #actually do the scanning
+        count = quotas_admin.list_quotas(args) do |row, cells|
+          formatter.row([ row, cells ])
+        end
+
+        formatter.footer(now, count)
+      end
+    end
+  end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_quota.rb b/hbase-shell/src/main/ruby/shell/commands/set_quota.rb
new file mode 100644
index 0000000..40e8a10
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/set_quota.rb
@@ -0,0 +1,70 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class SetQuota < Command
+      def help
+        return <<-EOF
+Set a quota for a user, table, or namespace.
+Syntax : set_quota TYPE => , 
+
+TYPE => THROTTLE
+The request limit can be expressed using the form 100req/sec, 100req/min
+and the size limit can be expressed using the form 100k/sec, 100M/min
+with (B, K, M, G, T, P) as valid size unit and (sec, min, hour, day) as valid time unit.
+Currently the throttle limit is per machine - a limit of 100req/min
+means that each machine can execute 100req/min.
+
+For example:
+
+    hbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10req/sec'
+    hbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10M/sec'
+    hbase> set_quota TYPE => THROTTLE, USER => 'u1', TABLE => 't2', LIMIT => '5K/min'
+    hbase> set_quota TYPE => THROTTLE, USER => 'u1', NAMESPACE => 'ns2', LIMIT => NONE
+    hbase> set_quota TYPE => THROTTLE, NAMESPACE => 'ns1', LIMIT => '10req/sec'
+    hbase> set_quota TYPE => THROTTLE, TABLE => 't1', LIMIT => '10M/sec'
+    hbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => NONE
+    hbase> set_quota USER => 'u1', GLOBAL_BYPASS => true
+EOF
+      end
+
+      def command(args = {})
+        if args.has_key?(TYPE)
+          qtype = args.delete(TYPE)
+          case qtype
+            when THROTTLE
+              if args[LIMIT].eql? NONE
+                args.delete(LIMIT)
+                quotas_admin.unthrottle(args)
+              else
+                quotas_admin.throttle(args)
+              end
+          else
+            raise "Invalid TYPE argument. got " + qtype
+          end
+        elsif args.has_key?(GLOBAL_BYPASS)
+          quotas_admin.set_global_bypass(args.delete(GLOBAL_BYPASS), args)
+        else
+          raise "Expected TYPE argument"
+        end
+      end
+    end
+  end
+end
-- 
1.9.5.msysgit.0