From b652506b977f5d36564e19fb2d4f27d6648f49f6 Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Mon, 31 Oct 2016 14:32:35 -0400 Subject: [PATCH 1/3] HBASE-16995 Implement protobuf msgs and client-side API for space quotas --- .../hadoop/hbase/quotas/QuotaSettingsFactory.java | 47 + .../org/apache/hadoop/hbase/quotas/QuotaType.java | 1 + .../hadoop/hbase/quotas/SpaceLimitSettings.java | 166 ++ .../hadoop/hbase/quotas/SpaceViolationPolicy.java | 44 + .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 51 + .../hbase/quotas/TestQuotaSettingsFactory.java | 148 ++ .../hbase/quotas/TestSpaceLimitSettings.java | 119 ++ .../shaded/protobuf/generated/MasterProtos.java | 498 ++++-- .../shaded/protobuf/generated/QuotaProtos.java | 1739 +++++++++++++++++++- .../src/main/protobuf/Master.proto | 2 + .../src/main/protobuf/Quota.proto | 21 + .../hbase/protobuf/generated/QuotaProtos.java | 1682 ++++++++++++++++++- hbase-protocol/src/main/protobuf/Quota.proto | 21 + 13 files changed, 4248 insertions(+), 291 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java index a7c49b3..b8e99b8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRe import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; @InterfaceAudience.Public @InterfaceStability.Evolving @@ -91,6 +92,9 @@ public class QuotaSettingsFactory { if (quotas.getBypassGlobals() == true) { settings.add(new QuotaGlobalsSettingsBypass(userName, tableName, namespace, true)); } + if (quotas.hasSpace()) { + settings.add(fromSpace(tableName, namespace, quotas.getSpace())); + } return settings; } @@ -124,6 +128,18 @@ public class QuotaSettingsFactory { return settings; } + static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota protoQuota) { + if ((null == table && null == namespace) || (null != table && null != namespace)) { + throw new IllegalArgumentException("Can only construct SpaceLimitSettings for a table or namespace."); + } + if (null != table) { + return SpaceLimitSettings.fromSpaceQuota(table, protoQuota); + } else { + // namespace must be non-null + return SpaceLimitSettings.fromSpaceQuota(namespace, protoQuota); + } + } + /* ========================================================================== * RPC Throttle */ @@ -280,4 +296,35 @@ public class QuotaSettingsFactory { public static QuotaSettings bypassGlobals(final String userName, final boolean bypassGlobals) { return new QuotaGlobalsSettingsBypass(userName, null, null, bypassGlobals); } + + /* ========================================================================== + * FileSystem Space Settings + */ + + /** + * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given table to the given size in bytes. + * When the space usage is exceeded by the table, the provided {@link SpaceViolationPolicy} is enacted on the table. + * + * @param tableName The name of the table on which the quota should be applied. + * @param sizeLimit The limit of a table's size in bytes. + * @param violationPolicy The action to take when the quota is exceeded. + * @return An {@link QuotaSettings} object. + */ + public static QuotaSettings limitTableSpace(final TableName tableName, long sizeLimit, final SpaceViolationPolicy violationPolicy) { + return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy); + } + + /** + * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given namespace to the given size in bytes. + * When the space usage is exceeded by all tables in the namespace, the provided {@link SpaceViolationPolicy} is enacted on + * all tables in the namespace. + * + * @param namespace The namespace on which the quota should be applied. + * @param sizeLimit The limit of the namespace's size in bytes. + * @param violationPolicy The action to take when the the quota is exceeded. + * @return An {@link QuotaSettings} object. + */ + public static QuotaSettings limitNamespaceSpace(final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) { + return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java index 40a8b66..2c44201 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java @@ -28,4 +28,5 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; public enum QuotaType { THROTTLE, GLOBAL_BYPASS, + SPACE, } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java new file mode 100644 index 0000000..dded9b5 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.Objects; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.Builder; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; + +/** + * A {@link QuotaSettings} implementation for implementing filesystem-use quotas. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +class SpaceLimitSettings extends QuotaSettings { + + private final SpaceLimitRequest proto; + + SpaceLimitSettings(TableName tableName, long sizeLimit, SpaceViolationPolicy violationPolicy) { + super(null, Objects.requireNonNull(tableName), null); + if (0L > sizeLimit) { + throw new IllegalArgumentException("Size limit must be a non-negative value."); + } + proto = buildProtoQuota(sizeLimit, Objects.requireNonNull(violationPolicy)); + } + + SpaceLimitSettings(String namespace, long sizeLimit, SpaceViolationPolicy violationPolicy) { + super(null, null, Objects.requireNonNull(namespace)); + if (0L > sizeLimit) { + throw new IllegalArgumentException("Size limit must be a non-negative value."); + } + proto = buildProtoQuota(sizeLimit, Objects.requireNonNull(violationPolicy)); + } + + /** + * Builds a {@link SpaceQuota} protobuf object given the arguments. + * + * @param sizeLimit The size limit of the quota. + * @param violationPolicy The action to take when the quota is exceeded. + * @return The protobuf SpaceQuota representation. + */ + private SpaceLimitRequest buildProtoQuota(long sizeLimit, SpaceViolationPolicy violationPolicy) { + return SpaceLimitRequest.newBuilder().setQuota( + SpaceQuota.newBuilder() + .setSoftLimit(sizeLimit) + .setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(violationPolicy)) + .build()) + .build(); + } + + /** + * Returns a copy of the internal state of this + */ + SpaceLimitRequest getProto() { + return proto.toBuilder().build(); + } + + @Override + public QuotaType getQuotaType() { + return QuotaType.SPACE; + } + + @Override + protected void setupSetQuotaRequest(Builder builder) { + // TableName/Namespace are serialized in QuotaSettings + builder.setSpaceLimit(proto); + } + + /** + * Constructs a {@link SpaceLimitSettings} from the provided protobuf message and tablename. + * + * @param tableName The target tablename for the limit. + * @param proto The protobuf representation. + * @return A QuotaSettings. + */ + static SpaceLimitSettings fromSpaceQuota(final TableName tableName, + final QuotaProtos.SpaceQuota proto) { + validateProtoArguments(proto); + return new SpaceLimitSettings(tableName, proto.getSoftLimit(), + ProtobufUtil.toViolationPolicy(proto.getViolationPolicy())); + } + + /** + * Constructs a {@link SpaceLimitSettings} from the provided protobuf message and namespace. + * + * @param namespace The target namespace for the limit. + * @param proto The protobuf representation. + * @return A QuotaSettings. + */ + static SpaceLimitSettings fromSpaceQuota(final String namespace, + final QuotaProtos.SpaceQuota proto) { + validateProtoArguments(proto); + return new SpaceLimitSettings(namespace, proto.getSoftLimit(), + ProtobufUtil.toViolationPolicy(proto.getViolationPolicy())); + } + + /** + * Validates that the provided protobuf SpaceQuota has the necessary information to construct + * a {@link SpaceLimitSettings}. + * + * @param proto The protobuf message to validate. + */ + static void validateProtoArguments(final QuotaProtos.SpaceQuota proto) { + if (!Objects.requireNonNull(proto).hasSoftLimit()) { + throw new IllegalArgumentException("Cannot handle SpaceQuota without a soft limit"); + } + if (!proto.hasViolationPolicy()) { + throw new IllegalArgumentException("Cannot handle SpaceQuota without a violation policy"); + } + } + + @Override + public int hashCode() { + return Objects.hash(getTableName(), getNamespace(), proto); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (!(o instanceof SpaceLimitSettings)) { + return false; + } + // o is non-null and an instance of SpaceLimitSettings + SpaceLimitSettings other = (SpaceLimitSettings) o; + return Objects.equals(getTableName(), other.getTableName()) && + Objects.equals(getNamespace(), other.getNamespace()) && + Objects.equals(proto, other.proto); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("TYPE => SPACE"); + if (null != getTableName()) { + sb.append(", TABLE => ").append(getTableName()); + } + if (null != getNamespace()) { + sb.append(", NAMESPACE => ").append(getNamespace()); + } + sb.append(", LIMIT => ").append(proto.getQuota().getSoftLimit()); + sb.append(", VIOLATION_POLICY => ").append(proto.getQuota().getViolationPolicy()); + return sb.toString(); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java new file mode 100644 index 0000000..c63acb0 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Enumeration that represents the action HBase will take when a space quota is violated. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum SpaceViolationPolicy { + /** + * Disables the table(s). + */ + DISABLE, + /** + * Disallows any mutations or compactions on the table(s). + */ + NO_WRITES_COMPACTIONS, + /** + * Disallows any mutations (but allows compactions) on the table(s). + */ + NO_WRITES, + /** + * Disallows any updates (but allows deletes and compactions) on the table(s). + */ + NO_INSERTS, +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 2758c26..2bfc8ac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; +import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy; import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; @@ -2464,6 +2465,7 @@ public final class ProtobufUtil { public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { switch (proto) { case THROTTLE: return QuotaType.THROTTLE; + case SPACE: return QuotaType.SPACE; } throw new RuntimeException("Invalid QuotaType " + proto); } @@ -2477,11 +2479,45 @@ public final class ProtobufUtil { public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { switch (type) { case THROTTLE: return QuotaProtos.QuotaType.THROTTLE; + case SPACE: return QuotaProtos.QuotaType.SPACE; } throw new RuntimeException("Invalid QuotaType " + type); } /** + * Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy. + * + * @param proto The protocol buffer space violation policy. + * @return The corresponding client SpaceViolationPolicy. + */ + public static SpaceViolationPolicy toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) { + switch (proto) { + case DISABLE: return SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: return SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: return SpaceViolationPolicy.NO_INSERTS; + } + throw new RuntimeException("Invalid SpaceViolationPolicy " + proto); + } + + /** + * Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy. + * + * @param policy The client SpaceViolationPolicy object. + * @return The corresponding protocol buffer SpaceViolationPolicy. + */ + public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy( + final SpaceViolationPolicy policy) { + switch (policy) { + case DISABLE: return QuotaProtos.SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: return QuotaProtos.SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; + } + throw new RuntimeException("Invalid SpaceViolationPolicy " + policy); + } + + /** * Build a protocol buffer TimedQuota * * @param limit the allowed number of request/data per timeUnit @@ -2499,6 +2535,21 @@ public final class ProtobufUtil { } /** + * Builds a protocol buffer SpaceQuota. + * + * @param limit The maximum space usage for the quota in bytes. + * @param violationPolicy The policy to apply when the quota is violated. + * @return The protocol buffer SpaceQuota. + */ + public static QuotaProtos.SpaceQuota toProtoSpaceQuota(final long limit, + final SpaceViolationPolicy violationPolicy) { + return QuotaProtos.SpaceQuota.newBuilder() + .setSoftLimit(limit) + .setViolationPolicy(toProtoViolationPolicy(violationPolicy)) + .build(); + } + + /** * Generates a marker for the WAL so that we propagate the notion of a bulk region load * throughout the WAL. * diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java new file mode 100644 index 0000000..17015d6 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test class for {@link QuotaSettingsFactory}. + */ +@Category(SmallTests.class) +public class TestQuotaSettingsFactory { + + @Test + public void testAllQuotasAddedToList() { + final SpaceQuota spaceQuota = SpaceQuota.newBuilder() + .setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table + .build(); + final long readLimit = 1000; + final long writeLimit = 500; + final Throttle throttle = Throttle.newBuilder() + // 1000 read reqs/min + .setReadNum(TimedQuota.newBuilder().setSoftLimit(readLimit).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) + // 500 write reqs/min + .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) + .build(); + final Quotas quotas = Quotas.newBuilder() + .setSpace(spaceQuota) // Set the FS quotas + .setThrottle(throttle) // Set some RPC limits + .build(); + final TableName tn = TableName.valueOf("my_table"); + List settings = QuotaSettingsFactory.fromTableQuotas(tn, quotas); + assertEquals(3, settings.size()); + boolean seenRead = false; + boolean seenWrite = false; + boolean seenSpace = false; + for (QuotaSettings setting : settings) { + if (setting instanceof ThrottleSettings) { + ThrottleSettings throttleSettings = (ThrottleSettings) setting; + switch (throttleSettings.getThrottleType()) { + case READ_NUMBER: + assertFalse("Should not have multiple read quotas", seenRead); + assertEquals(readLimit, throttleSettings.getSoftLimit()); + assertEquals(TimeUnit.MINUTES, throttleSettings.getTimeUnit()); + assertEquals(tn, throttleSettings.getTableName()); + assertNull("Username should be null", throttleSettings.getUserName()); + assertNull("Namespace should be null", throttleSettings.getNamespace()); + seenRead = true; + break; + case WRITE_NUMBER: + assertFalse("Should not have multiple write quotas", seenWrite); + assertEquals(writeLimit, throttleSettings.getSoftLimit()); + assertEquals(TimeUnit.MINUTES, throttleSettings.getTimeUnit()); + assertEquals(tn, throttleSettings.getTableName()); + assertNull("Username should be null", throttleSettings.getUserName()); + assertNull("Namespace should be null", throttleSettings.getNamespace()); + seenWrite = true; + break; + default: + fail("Unexpected throttle type: " + throttleSettings.getThrottleType()); + } + } else if (setting instanceof SpaceLimitSettings) { + assertFalse("Should not have multiple space quotas", seenSpace); + SpaceLimitSettings spaceLimit = (SpaceLimitSettings) setting; + assertEquals(tn, spaceLimit.getTableName()); + assertNull("Username should be null", spaceLimit.getUserName()); + assertNull("Namespace should be null", spaceLimit.getNamespace()); + assertTrue("SpaceLimitSettings should have a SpaceQuota", spaceLimit.getProto().hasQuota()); + assertEquals(spaceQuota, spaceLimit.getProto().getQuota()); + seenSpace = true; + } else { + fail("Unexpected QuotaSettings implementation: " + setting.getClass()); + } + } + assertTrue("Should have seen a read quota", seenRead); + assertTrue("Should have seen a write quota", seenWrite); + assertTrue("Should have seen a space quota", seenSpace); + } + + @Test(expected = IllegalArgumentException.class) + public void testNeitherTableNorNamespace() { + final SpaceQuota spaceQuota = SpaceQuota.newBuilder() + .setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) + .build(); + QuotaSettingsFactory.fromSpace(null, null, spaceQuota); + } + + @Test(expected = IllegalArgumentException.class) + public void testBothTableAndNamespace() { + final SpaceQuota spaceQuota = SpaceQuota.newBuilder() + .setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) + .build(); + QuotaSettingsFactory.fromSpace(TableName.valueOf("foo"), "bar", spaceQuota); + } + + @Test + public void testSpaceLimitSettings() { + final TableName tableName = TableName.valueOf("foo"); + final long sizeLimit = 1024L * 1024L * 1024L * 75; // 75GB + final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_INSERTS; + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy); + assertNotNull("QuotaSettings should not be null", settings); + assertTrue("Should be an instance of SpaceLimitSettings", settings instanceof SpaceLimitSettings); + SpaceLimitSettings spaceLimitSettings = (SpaceLimitSettings) settings; + SpaceLimitRequest protoRequest = spaceLimitSettings.getProto(); + assertTrue("Request should have a SpaceQuota", protoRequest.hasQuota()); + SpaceQuota quota = protoRequest.getQuota(); + assertEquals(sizeLimit, quota.getSoftLimit()); + assertEquals(violationPolicy, ProtobufUtil.toViolationPolicy(quota.getViolationPolicy())); + } +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java new file mode 100644 index 0000000..77a00da --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test class for {@link SpaceLimitSettings}. + */ +@Category({SmallTests.class}) +public class TestSpaceLimitSettings { + + @Test(expected = IllegalArgumentException.class) + public void testInvalidTableQuotaSizeLimit() { + new SpaceLimitSettings(TableName.valueOf("foo"), -1, SpaceViolationPolicy.NO_INSERTS); + } + + @Test(expected = NullPointerException.class) + public void testNullTableName() { + TableName tn = null; + new SpaceLimitSettings(tn, 1, SpaceViolationPolicy.NO_INSERTS); + } + + @Test(expected = NullPointerException.class) + public void testNullTableViolationPolicy() { + new SpaceLimitSettings(TableName.valueOf("foo"), 1, null); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidNamespaceQuotaSizeLimit() { + new SpaceLimitSettings("foo_ns", -1, SpaceViolationPolicy.NO_INSERTS); + } + + @Test(expected = NullPointerException.class) + public void testNullNamespace() { + String ns = null; + new SpaceLimitSettings(ns, 1, SpaceViolationPolicy.NO_INSERTS); + } + + @Test(expected = NullPointerException.class) + public void testNullNamespaceViolationPolicy() { + new SpaceLimitSettings("foo_ns", 1, null); + } + + @Test + public void testTableQuota() { + final TableName tableName = TableName.valueOf("foo"); + final long sizeLimit = 1024 * 1024; + final SpaceViolationPolicy policy = SpaceViolationPolicy.NO_WRITES; + SpaceLimitSettings settings = new SpaceLimitSettings(tableName, sizeLimit, policy); + SetQuotaRequest proto = QuotaSettings.buildSetQuotaRequestProto(settings); + + assertFalse("User should be missing", proto.hasUserName()); + assertFalse("Namespace should be missing", proto.hasNamespace()); + assertEquals(ProtobufUtil.toProtoTableName(tableName), proto.getTableName()); + SpaceLimitRequest spaceLimitReq = proto.getSpaceLimit(); + assertNotNull("SpaceLimitRequest was null", spaceLimitReq); + SpaceQuota spaceQuota = spaceLimitReq.getQuota(); + assertNotNull("SpaceQuota was null", spaceQuota); + assertEquals(sizeLimit, spaceQuota.getSoftLimit()); + assertEquals(ProtobufUtil.toProtoViolationPolicy(policy), spaceQuota.getViolationPolicy()); + + assertEquals(QuotaType.SPACE, settings.getQuotaType()); + + SpaceLimitSettings copy = new SpaceLimitSettings(tableName, sizeLimit, policy); + assertEquals(settings, copy); + assertEquals(settings.hashCode(), copy.hashCode()); + } + + @Test + public void testNamespaceQuota() { + final String namespace = "foo_ns"; + final long sizeLimit = 1024 * 1024; + final SpaceViolationPolicy policy = SpaceViolationPolicy.NO_WRITES; + SpaceLimitSettings settings = new SpaceLimitSettings(namespace, sizeLimit, policy); + SetQuotaRequest proto = QuotaSettings.buildSetQuotaRequestProto(settings); + + assertFalse("User should be missing", proto.hasUserName()); + assertFalse("TableName should be missing", proto.hasTableName()); + assertEquals(namespace, proto.getNamespace()); + SpaceLimitRequest spaceLimitReq = proto.getSpaceLimit(); + assertNotNull("SpaceLimitRequest was null", spaceLimitReq); + SpaceQuota spaceQuota = spaceLimitReq.getQuota(); + assertNotNull("SpaceQuota was null", spaceQuota); + assertEquals(sizeLimit, spaceQuota.getSoftLimit()); + assertEquals(ProtobufUtil.toProtoViolationPolicy(policy), spaceQuota.getViolationPolicy()); + + assertEquals(QuotaType.SPACE, settings.getQuotaType()); + + SpaceLimitSettings copy = new SpaceLimitSettings(namespace, sizeLimit, policy); + assertEquals(settings, copy); + assertEquals(settings.hashCode(), copy.hashCode()); + } +} diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java index 03ef208..e13d61d 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java @@ -59646,6 +59646,19 @@ public final class MasterProtos { * optional .hbase.pb.ThrottleRequest throttle = 7; */ org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder(); + + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + boolean hasSpaceLimit(); + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getSpaceLimit(); + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder getSpaceLimitOrBuilder(); } /** * Protobuf type {@code hbase.pb.SetQuotaRequest} @@ -59748,6 +59761,19 @@ public final class MasterProtos { bitField0_ |= 0x00000040; break; } + case 66: { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = spaceLimit_.toBuilder(); + } + spaceLimit_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(spaceLimit_); + spaceLimit_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } } } } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { @@ -59971,6 +59997,27 @@ public final class MasterProtos { return throttle_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance() : throttle_; } + public static final int SPACE_LIMIT_FIELD_NUMBER = 8; + private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest spaceLimit_; + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public boolean hasSpaceLimit() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getSpaceLimit() { + return spaceLimit_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_; + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder getSpaceLimitOrBuilder() { + return spaceLimit_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_; + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; @@ -60016,6 +60063,9 @@ public final class MasterProtos { if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeMessage(7, getThrottle()); } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(8, getSpaceLimit()); + } unknownFields.writeTo(output); } @@ -60049,6 +60099,10 @@ public final class MasterProtos { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeMessageSize(7, getThrottle()); } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(8, getSpaceLimit()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -60101,6 +60155,11 @@ public final class MasterProtos { result = result && getThrottle() .equals(other.getThrottle()); } + result = result && (hasSpaceLimit() == other.hasSpaceLimit()); + if (hasSpaceLimit()) { + result = result && getSpaceLimit() + .equals(other.getSpaceLimit()); + } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -60142,6 +60201,10 @@ public final class MasterProtos { hash = (37 * hash) + THROTTLE_FIELD_NUMBER; hash = (53 * hash) + getThrottle().hashCode(); } + if (hasSpaceLimit()) { + hash = (37 * hash) + SPACE_LIMIT_FIELD_NUMBER; + hash = (53 * hash) + getSpaceLimit().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -60258,6 +60321,7 @@ public final class MasterProtos { .alwaysUseFieldBuilders) { getTableNameFieldBuilder(); getThrottleFieldBuilder(); + getSpaceLimitFieldBuilder(); } } public Builder clear() { @@ -60284,6 +60348,12 @@ public final class MasterProtos { throttleBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); + if (spaceLimitBuilder_ == null) { + spaceLimit_ = null; + } else { + spaceLimitBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000080); return this; } @@ -60344,6 +60414,14 @@ public final class MasterProtos { } else { result.throttle_ = throttleBuilder_.build(); } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + if (spaceLimitBuilder_ == null) { + result.spaceLimit_ = spaceLimit_; + } else { + result.spaceLimit_ = spaceLimitBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -60413,6 +60491,9 @@ public final class MasterProtos { if (other.hasThrottle()) { mergeThrottle(other.getThrottle()); } + if (other.hasSpaceLimit()) { + mergeSpaceLimit(other.getSpaceLimit()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -60978,6 +61059,124 @@ public final class MasterProtos { } return throttleBuilder_; } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest spaceLimit_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder> spaceLimitBuilder_; + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public boolean hasSpaceLimit() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getSpaceLimit() { + if (spaceLimitBuilder_ == null) { + return spaceLimit_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_; + } else { + return spaceLimitBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public Builder setSpaceLimit(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest value) { + if (spaceLimitBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + spaceLimit_ = value; + onChanged(); + } else { + spaceLimitBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public Builder setSpaceLimit( + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder builderForValue) { + if (spaceLimitBuilder_ == null) { + spaceLimit_ = builderForValue.build(); + onChanged(); + } else { + spaceLimitBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public Builder mergeSpaceLimit(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest value) { + if (spaceLimitBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080) && + spaceLimit_ != null && + spaceLimit_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance()) { + spaceLimit_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.newBuilder(spaceLimit_).mergeFrom(value).buildPartial(); + } else { + spaceLimit_ = value; + } + onChanged(); + } else { + spaceLimitBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public Builder clearSpaceLimit() { + if (spaceLimitBuilder_ == null) { + spaceLimit_ = null; + onChanged(); + } else { + spaceLimitBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000080); + return this; + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder getSpaceLimitBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return getSpaceLimitFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder getSpaceLimitOrBuilder() { + if (spaceLimitBuilder_ != null) { + return spaceLimitBuilder_.getMessageOrBuilder(); + } else { + return spaceLimit_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_; + } + } + /** + * optional .hbase.pb.SpaceLimitRequest space_limit = 8; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder> + getSpaceLimitFieldBuilder() { + if (spaceLimitBuilder_ == null) { + spaceLimitBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder>( + getSpaceLimit(), + getParentForChildren(), + isClean()); + spaceLimit_ = null; + } + return spaceLimitBuilder_; + } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); @@ -69577,158 +69776,159 @@ public final class MasterProtos { "ureResponse\022\034\n\024is_procedure_aborted\030\001 \002(" + "\010\"\027\n\025ListProceduresRequest\"@\n\026ListProced" + "uresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase." + - "pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser" + + "pb.Procedure\"\377\001\n\017SetQuotaRequest\022\021\n\tuser" + "_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnames", "pace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.p" + "b.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypas" + "s_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbas" + - "e.pb.ThrottleRequest\"\022\n\020SetQuotaResponse" + - "\"J\n\037MajorCompactionTimestampRequest\022\'\n\nt" + - "able_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(" + - "MajorCompactionTimestampForRegionRequest" + - "\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecif" + - "ier\"@\n MajorCompactionTimestampResponse\022" + - "\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Securit", - "yCapabilitiesRequest\"\354\001\n\034SecurityCapabil" + - "itiesResponse\022G\n\014capabilities\030\001 \003(\01621.hb" + - "ase.pb.SecurityCapabilitiesResponse.Capa" + - "bility\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTI" + - "CATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rA" + - "UTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023" + - "\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchType\022" + - "\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022" + - "e\n\024GetSchemaAlterStatus\022%.hbase.pb.GetSc" + - "hemaAlterStatusRequest\032&.hbase.pb.GetSch", - "emaAlterStatusResponse\022b\n\023GetTableDescri" + - "ptors\022$.hbase.pb.GetTableDescriptorsRequ" + - "est\032%.hbase.pb.GetTableDescriptorsRespon" + - "se\022P\n\rGetTableNames\022\036.hbase.pb.GetTableN" + - "amesRequest\032\037.hbase.pb.GetTableNamesResp" + - "onse\022Y\n\020GetClusterStatus\022!.hbase.pb.GetC" + - "lusterStatusRequest\032\".hbase.pb.GetCluste" + - "rStatusResponse\022V\n\017IsMasterRunning\022 .hba" + - "se.pb.IsMasterRunningRequest\032!.hbase.pb." + - "IsMasterRunningResponse\022D\n\tAddColumn\022\032.h", - "base.pb.AddColumnRequest\032\033.hbase.pb.AddC" + - "olumnResponse\022M\n\014DeleteColumn\022\035.hbase.pb" + - ".DeleteColumnRequest\032\036.hbase.pb.DeleteCo" + - "lumnResponse\022M\n\014ModifyColumn\022\035.hbase.pb." + - "ModifyColumnRequest\032\036.hbase.pb.ModifyCol" + - "umnResponse\022G\n\nMoveRegion\022\033.hbase.pb.Mov" + - "eRegionRequest\032\034.hbase.pb.MoveRegionResp" + - "onse\022k\n\026DispatchMergingRegions\022\'.hbase.p" + - "b.DispatchMergingRegionsRequest\032(.hbase." + - "pb.DispatchMergingRegionsResponse\022M\n\014Ass", - "ignRegion\022\035.hbase.pb.AssignRegionRequest" + - "\032\036.hbase.pb.AssignRegionResponse\022S\n\016Unas" + - "signRegion\022\037.hbase.pb.UnassignRegionRequ" + - "est\032 .hbase.pb.UnassignRegionResponse\022P\n" + - "\rOfflineRegion\022\036.hbase.pb.OfflineRegionR" + - "equest\032\037.hbase.pb.OfflineRegionResponse\022" + - "J\n\013DeleteTable\022\034.hbase.pb.DeleteTableReq" + - "uest\032\035.hbase.pb.DeleteTableResponse\022P\n\rt" + - "runcateTable\022\036.hbase.pb.TruncateTableReq" + - "uest\032\037.hbase.pb.TruncateTableResponse\022J\n", - "\013EnableTable\022\034.hbase.pb.EnableTableReque" + - "st\032\035.hbase.pb.EnableTableResponse\022M\n\014Dis" + - "ableTable\022\035.hbase.pb.DisableTableRequest" + - "\032\036.hbase.pb.DisableTableResponse\022J\n\013Modi" + - "fyTable\022\034.hbase.pb.ModifyTableRequest\032\035." + - "hbase.pb.ModifyTableResponse\022J\n\013CreateTa" + - "ble\022\034.hbase.pb.CreateTableRequest\032\035.hbas" + - "e.pb.CreateTableResponse\022A\n\010Shutdown\022\031.h" + - "base.pb.ShutdownRequest\032\032.hbase.pb.Shutd" + - "ownResponse\022G\n\nStopMaster\022\033.hbase.pb.Sto", - "pMasterRequest\032\034.hbase.pb.StopMasterResp" + - "onse\022h\n\031IsMasterInMaintenanceMode\022$.hbas" + - "e.pb.IsInMaintenanceModeRequest\032%.hbase." + - "pb.IsInMaintenanceModeResponse\022>\n\007Balanc" + - "e\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.B" + - "alanceResponse\022_\n\022SetBalancerRunning\022#.h" + - "base.pb.SetBalancerRunningRequest\032$.hbas" + - "e.pb.SetBalancerRunningResponse\022\\\n\021IsBal" + - "ancerEnabled\022\".hbase.pb.IsBalancerEnable" + - "dRequest\032#.hbase.pb.IsBalancerEnabledRes", - "ponse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase." + - "pb.SetSplitOrMergeEnabledRequest\032(.hbase" + - ".pb.SetSplitOrMergeEnabledResponse\022h\n\025Is" + - "SplitOrMergeEnabled\022&.hbase.pb.IsSplitOr" + - "MergeEnabledRequest\032\'.hbase.pb.IsSplitOr" + - "MergeEnabledResponse\022D\n\tNormalize\022\032.hbas" + - "e.pb.NormalizeRequest\032\033.hbase.pb.Normali" + - "zeResponse\022e\n\024SetNormalizerRunning\022%.hba" + - "se.pb.SetNormalizerRunningRequest\032&.hbas" + - "e.pb.SetNormalizerRunningResponse\022b\n\023IsN", - "ormalizerEnabled\022$.hbase.pb.IsNormalizer" + - "EnabledRequest\032%.hbase.pb.IsNormalizerEn" + - "abledResponse\022S\n\016RunCatalogScan\022\037.hbase." + - "pb.RunCatalogScanRequest\032 .hbase.pb.RunC" + - "atalogScanResponse\022e\n\024EnableCatalogJanit" + - "or\022%.hbase.pb.EnableCatalogJanitorReques" + - "t\032&.hbase.pb.EnableCatalogJanitorRespons" + - "e\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb." + - "IsCatalogJanitorEnabledRequest\032).hbase.p" + - "b.IsCatalogJanitorEnabledResponse\022^\n\021Exe", - "cMasterService\022#.hbase.pb.CoprocessorSer" + - "viceRequest\032$.hbase.pb.CoprocessorServic" + - "eResponse\022A\n\010Snapshot\022\031.hbase.pb.Snapsho" + - "tRequest\032\032.hbase.pb.SnapshotResponse\022h\n\025" + - "GetCompletedSnapshots\022&.hbase.pb.GetComp" + - "letedSnapshotsRequest\032\'.hbase.pb.GetComp" + - "letedSnapshotsResponse\022S\n\016DeleteSnapshot" + - "\022\037.hbase.pb.DeleteSnapshotRequest\032 .hbas" + - "e.pb.DeleteSnapshotResponse\022S\n\016IsSnapsho" + - "tDone\022\037.hbase.pb.IsSnapshotDoneRequest\032 ", - ".hbase.pb.IsSnapshotDoneResponse\022V\n\017Rest" + - "oreSnapshot\022 .hbase.pb.RestoreSnapshotRe" + - "quest\032!.hbase.pb.RestoreSnapshotResponse" + - "\022P\n\rExecProcedure\022\036.hbase.pb.ExecProcedu" + - "reRequest\032\037.hbase.pb.ExecProcedureRespon" + - "se\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Ex" + - "ecProcedureRequest\032\037.hbase.pb.ExecProced" + - "ureResponse\022V\n\017IsProcedureDone\022 .hbase.p" + - "b.IsProcedureDoneRequest\032!.hbase.pb.IsPr" + - "ocedureDoneResponse\022V\n\017ModifyNamespace\022 ", - ".hbase.pb.ModifyNamespaceRequest\032!.hbase" + - ".pb.ModifyNamespaceResponse\022V\n\017CreateNam" + - "espace\022 .hbase.pb.CreateNamespaceRequest" + - "\032!.hbase.pb.CreateNamespaceResponse\022V\n\017D" + - "eleteNamespace\022 .hbase.pb.DeleteNamespac" + - "eRequest\032!.hbase.pb.DeleteNamespaceRespo" + - "nse\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb" + - ".GetNamespaceDescriptorRequest\032(.hbase.p" + - "b.GetNamespaceDescriptorResponse\022q\n\030List" + - "NamespaceDescriptors\022).hbase.pb.ListName", - "spaceDescriptorsRequest\032*.hbase.pb.ListN" + - "amespaceDescriptorsResponse\022\206\001\n\037ListTabl" + - "eDescriptorsByNamespace\0220.hbase.pb.ListT" + - "ableDescriptorsByNamespaceRequest\0321.hbas" + - "e.pb.ListTableDescriptorsByNamespaceResp" + - "onse\022t\n\031ListTableNamesByNamespace\022*.hbas" + - "e.pb.ListTableNamesByNamespaceRequest\032+." + - "hbase.pb.ListTableNamesByNamespaceRespon" + - "se\022P\n\rGetTableState\022\036.hbase.pb.GetTableS" + - "tateRequest\032\037.hbase.pb.GetTableStateResp", - "onse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaRequ" + - "est\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLa" + - "stMajorCompactionTimestamp\022).hbase.pb.Ma" + - "jorCompactionTimestampRequest\032*.hbase.pb" + - ".MajorCompactionTimestampResponse\022\212\001\n(ge" + - "tLastMajorCompactionTimestampForRegion\0222" + - ".hbase.pb.MajorCompactionTimestampForReg" + - "ionRequest\032*.hbase.pb.MajorCompactionTim" + - "estampResponse\022_\n\022getProcedureResult\022#.h" + - "base.pb.GetProcedureResultRequest\032$.hbas", - "e.pb.GetProcedureResultResponse\022h\n\027getSe" + - "curityCapabilities\022%.hbase.pb.SecurityCa" + - "pabilitiesRequest\032&.hbase.pb.SecurityCap" + - "abilitiesResponse\022S\n\016AbortProcedure\022\037.hb" + - "ase.pb.AbortProcedureRequest\032 .hbase.pb." + - "AbortProcedureResponse\022S\n\016ListProcedures" + - "\022\037.hbase.pb.ListProceduresRequest\032 .hbas" + - "e.pb.ListProceduresResponseBI\n1org.apach" + - "e.hadoop.hbase.shaded.protobuf.generated" + - "B\014MasterProtosH\001\210\001\001\240\001\001" + "e.pb.ThrottleRequest\0220\n\013space_limit\030\010 \001(" + + "\0132\033.hbase.pb.SpaceLimitRequest\"\022\n\020SetQuo" + + "taResponse\"J\n\037MajorCompactionTimestampRe" + + "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" + + "leName\"U\n(MajorCompactionTimestampForReg" + + "ionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + + "gionSpecifier\"@\n MajorCompactionTimestam", + "pResponse\022\034\n\024compaction_timestamp\030\001 \002(\003\"" + + "\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034Secur" + + "ityCapabilitiesResponse\022G\n\014capabilities\030" + + "\001 \003(\01621.hbase.pb.SecurityCapabilitiesRes" + + "ponse.Capability\"\202\001\n\nCapability\022\031\n\025SIMPL" + + "E_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTICAT" + + "ION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORI" + + "ZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterS" + + "witchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMast" + + "erService\022e\n\024GetSchemaAlterStatus\022%.hbas", + "e.pb.GetSchemaAlterStatusRequest\032&.hbase" + + ".pb.GetSchemaAlterStatusResponse\022b\n\023GetT" + + "ableDescriptors\022$.hbase.pb.GetTableDescr" + + "iptorsRequest\032%.hbase.pb.GetTableDescrip" + + "torsResponse\022P\n\rGetTableNames\022\036.hbase.pb" + + ".GetTableNamesRequest\032\037.hbase.pb.GetTabl" + + "eNamesResponse\022Y\n\020GetClusterStatus\022!.hba" + + "se.pb.GetClusterStatusRequest\032\".hbase.pb" + + ".GetClusterStatusResponse\022V\n\017IsMasterRun" + + "ning\022 .hbase.pb.IsMasterRunningRequest\032!", + ".hbase.pb.IsMasterRunningResponse\022D\n\tAdd" + + "Column\022\032.hbase.pb.AddColumnRequest\032\033.hba" + + "se.pb.AddColumnResponse\022M\n\014DeleteColumn\022" + + "\035.hbase.pb.DeleteColumnRequest\032\036.hbase.p" + + "b.DeleteColumnResponse\022M\n\014ModifyColumn\022\035" + + ".hbase.pb.ModifyColumnRequest\032\036.hbase.pb" + + ".ModifyColumnResponse\022G\n\nMoveRegion\022\033.hb" + + "ase.pb.MoveRegionRequest\032\034.hbase.pb.Move" + + "RegionResponse\022k\n\026DispatchMergingRegions" + + "\022\'.hbase.pb.DispatchMergingRegionsReques", + "t\032(.hbase.pb.DispatchMergingRegionsRespo" + + "nse\022M\n\014AssignRegion\022\035.hbase.pb.AssignReg" + + "ionRequest\032\036.hbase.pb.AssignRegionRespon" + + "se\022S\n\016UnassignRegion\022\037.hbase.pb.Unassign" + + "RegionRequest\032 .hbase.pb.UnassignRegionR" + + "esponse\022P\n\rOfflineRegion\022\036.hbase.pb.Offl" + + "ineRegionRequest\032\037.hbase.pb.OfflineRegio" + + "nResponse\022J\n\013DeleteTable\022\034.hbase.pb.Dele" + + "teTableRequest\032\035.hbase.pb.DeleteTableRes" + + "ponse\022P\n\rtruncateTable\022\036.hbase.pb.Trunca", + "teTableRequest\032\037.hbase.pb.TruncateTableR" + + "esponse\022J\n\013EnableTable\022\034.hbase.pb.Enable" + + "TableRequest\032\035.hbase.pb.EnableTableRespo" + + "nse\022M\n\014DisableTable\022\035.hbase.pb.DisableTa" + + "bleRequest\032\036.hbase.pb.DisableTableRespon" + + "se\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTable" + + "Request\032\035.hbase.pb.ModifyTableResponse\022J" + + "\n\013CreateTable\022\034.hbase.pb.CreateTableRequ" + + "est\032\035.hbase.pb.CreateTableResponse\022A\n\010Sh" + + "utdown\022\031.hbase.pb.ShutdownRequest\032\032.hbas", + "e.pb.ShutdownResponse\022G\n\nStopMaster\022\033.hb" + + "ase.pb.StopMasterRequest\032\034.hbase.pb.Stop" + + "MasterResponse\022h\n\031IsMasterInMaintenanceM" + + "ode\022$.hbase.pb.IsInMaintenanceModeReques" + + "t\032%.hbase.pb.IsInMaintenanceModeResponse" + + "\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031." + + "hbase.pb.BalanceResponse\022_\n\022SetBalancerR" + + "unning\022#.hbase.pb.SetBalancerRunningRequ" + + "est\032$.hbase.pb.SetBalancerRunningRespons" + + "e\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBala", + "ncerEnabledRequest\032#.hbase.pb.IsBalancer" + + "EnabledResponse\022k\n\026SetSplitOrMergeEnable" + + "d\022\'.hbase.pb.SetSplitOrMergeEnabledReque" + + "st\032(.hbase.pb.SetSplitOrMergeEnabledResp" + + "onse\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb" + + ".IsSplitOrMergeEnabledRequest\032\'.hbase.pb" + + ".IsSplitOrMergeEnabledResponse\022D\n\tNormal" + + "ize\022\032.hbase.pb.NormalizeRequest\032\033.hbase." + + "pb.NormalizeResponse\022e\n\024SetNormalizerRun" + + "ning\022%.hbase.pb.SetNormalizerRunningRequ", + "est\032&.hbase.pb.SetNormalizerRunningRespo" + + "nse\022b\n\023IsNormalizerEnabled\022$.hbase.pb.Is" + + "NormalizerEnabledRequest\032%.hbase.pb.IsNo" + + "rmalizerEnabledResponse\022S\n\016RunCatalogSca" + + "n\022\037.hbase.pb.RunCatalogScanRequest\032 .hba" + + "se.pb.RunCatalogScanResponse\022e\n\024EnableCa" + + "talogJanitor\022%.hbase.pb.EnableCatalogJan" + + "itorRequest\032&.hbase.pb.EnableCatalogJani" + + "torResponse\022n\n\027IsCatalogJanitorEnabled\022(" + + ".hbase.pb.IsCatalogJanitorEnabledRequest", + "\032).hbase.pb.IsCatalogJanitorEnabledRespo" + + "nse\022^\n\021ExecMasterService\022#.hbase.pb.Copr" + + "ocessorServiceRequest\032$.hbase.pb.Coproce" + + "ssorServiceResponse\022A\n\010Snapshot\022\031.hbase." + + "pb.SnapshotRequest\032\032.hbase.pb.SnapshotRe" + + "sponse\022h\n\025GetCompletedSnapshots\022&.hbase." + + "pb.GetCompletedSnapshotsRequest\032\'.hbase." + + "pb.GetCompletedSnapshotsResponse\022S\n\016Dele" + + "teSnapshot\022\037.hbase.pb.DeleteSnapshotRequ" + + "est\032 .hbase.pb.DeleteSnapshotResponse\022S\n", + "\016IsSnapshotDone\022\037.hbase.pb.IsSnapshotDon" + + "eRequest\032 .hbase.pb.IsSnapshotDoneRespon" + + "se\022V\n\017RestoreSnapshot\022 .hbase.pb.Restore" + + "SnapshotRequest\032!.hbase.pb.RestoreSnapsh" + + "otResponse\022P\n\rExecProcedure\022\036.hbase.pb.E" + + "xecProcedureRequest\032\037.hbase.pb.ExecProce" + + "dureResponse\022W\n\024ExecProcedureWithRet\022\036.h" + + "base.pb.ExecProcedureRequest\032\037.hbase.pb." + + "ExecProcedureResponse\022V\n\017IsProcedureDone" + + "\022 .hbase.pb.IsProcedureDoneRequest\032!.hba", + "se.pb.IsProcedureDoneResponse\022V\n\017ModifyN" + + "amespace\022 .hbase.pb.ModifyNamespaceReque" + + "st\032!.hbase.pb.ModifyNamespaceResponse\022V\n" + + "\017CreateNamespace\022 .hbase.pb.CreateNamesp" + + "aceRequest\032!.hbase.pb.CreateNamespaceRes" + + "ponse\022V\n\017DeleteNamespace\022 .hbase.pb.Dele" + + "teNamespaceRequest\032!.hbase.pb.DeleteName" + + "spaceResponse\022k\n\026GetNamespaceDescriptor\022" + + "\'.hbase.pb.GetNamespaceDescriptorRequest" + + "\032(.hbase.pb.GetNamespaceDescriptorRespon", + "se\022q\n\030ListNamespaceDescriptors\022).hbase.p" + + "b.ListNamespaceDescriptorsRequest\032*.hbas" + + "e.pb.ListNamespaceDescriptorsResponse\022\206\001" + + "\n\037ListTableDescriptorsByNamespace\0220.hbas" + + "e.pb.ListTableDescriptorsByNamespaceRequ" + + "est\0321.hbase.pb.ListTableDescriptorsByNam" + + "espaceResponse\022t\n\031ListTableNamesByNamesp" + + "ace\022*.hbase.pb.ListTableNamesByNamespace" + + "Request\032+.hbase.pb.ListTableNamesByNames" + + "paceResponse\022P\n\rGetTableState\022\036.hbase.pb", + ".GetTableStateRequest\032\037.hbase.pb.GetTabl" + + "eStateResponse\022A\n\010SetQuota\022\031.hbase.pb.Se" + + "tQuotaRequest\032\032.hbase.pb.SetQuotaRespons" + + "e\022x\n\037getLastMajorCompactionTimestamp\022).h" + + "base.pb.MajorCompactionTimestampRequest\032" + + "*.hbase.pb.MajorCompactionTimestampRespo" + + "nse\022\212\001\n(getLastMajorCompactionTimestampF" + + "orRegion\0222.hbase.pb.MajorCompactionTimes" + + "tampForRegionRequest\032*.hbase.pb.MajorCom" + + "pactionTimestampResponse\022_\n\022getProcedure", + "Result\022#.hbase.pb.GetProcedureResultRequ" + + "est\032$.hbase.pb.GetProcedureResultRespons" + + "e\022h\n\027getSecurityCapabilities\022%.hbase.pb." + + "SecurityCapabilitiesRequest\032&.hbase.pb.S" + + "ecurityCapabilitiesResponse\022S\n\016AbortProc" + + "edure\022\037.hbase.pb.AbortProcedureRequest\032 " + + ".hbase.pb.AbortProcedureResponse\022S\n\016List" + + "Procedures\022\037.hbase.pb.ListProceduresRequ" + + "est\032 .hbase.pb.ListProceduresResponseBI\n" + + "1org.apache.hadoop.hbase.shaded.protobuf", + ".generatedB\014MasterProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -70377,7 +70577,7 @@ public final class MasterProtos { internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, - new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); + new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", "SpaceLimit", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java index d14336a..a715115 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java @@ -239,12 +239,20 @@ public final class QuotaProtos { * THROTTLE = 1; */ THROTTLE(1), + /** + * SPACE = 2; + */ + SPACE(2), ; /** * THROTTLE = 1; */ public static final int THROTTLE_VALUE = 1; + /** + * SPACE = 2; + */ + public static final int SPACE_VALUE = 2; public final int getNumber() { @@ -262,6 +270,7 @@ public final class QuotaProtos { public static QuotaType forNumber(int value) { switch (value) { case 1: return THROTTLE; + case 2: return SPACE; default: return null; } } @@ -311,6 +320,150 @@ public final class QuotaProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType) } + /** + *
+   * Defines what action should be taken when the SpaceQuota is violated
+   * 
+ * + * Protobuf enum {@code hbase.pb.SpaceViolationPolicy} + */ + public enum SpaceViolationPolicy + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum { + /** + *
+     * Disable the table(s)
+     * 
+ * + * DISABLE = 1; + */ + DISABLE(1), + /** + *
+     * No writes, bulk-loads, or compactions
+     * 
+ * + * NO_WRITES_COMPACTIONS = 2; + */ + NO_WRITES_COMPACTIONS(2), + /** + *
+     * No writes or bulk-loads
+     * 
+ * + * NO_WRITES = 3; + */ + NO_WRITES(3), + /** + *
+     * No puts or bulk-loads, but deletes are allowed
+     * 
+ * + * NO_INSERTS = 4; + */ + NO_INSERTS(4), + ; + + /** + *
+     * Disable the table(s)
+     * 
+ * + * DISABLE = 1; + */ + public static final int DISABLE_VALUE = 1; + /** + *
+     * No writes, bulk-loads, or compactions
+     * 
+ * + * NO_WRITES_COMPACTIONS = 2; + */ + public static final int NO_WRITES_COMPACTIONS_VALUE = 2; + /** + *
+     * No writes or bulk-loads
+     * 
+ * + * NO_WRITES = 3; + */ + public static final int NO_WRITES_VALUE = 3; + /** + *
+     * No puts or bulk-loads, but deletes are allowed
+     * 
+ * + * NO_INSERTS = 4; + */ + public static final int NO_INSERTS_VALUE = 4; + + + public final int getNumber() { + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SpaceViolationPolicy valueOf(int value) { + return forNumber(value); + } + + public static SpaceViolationPolicy forNumber(int value) { + switch (value) { + case 1: return DISABLE; + case 2: return NO_WRITES_COMPACTIONS; + case 3: return NO_WRITES; + case 4: return NO_INSERTS; + default: return null; + } + } + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap< + SpaceViolationPolicy> internalValueMap = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap() { + public SpaceViolationPolicy findValueByNumber(int number) { + return SpaceViolationPolicy.forNumber(number); + } + }; + + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3); + } + + private static final SpaceViolationPolicy[] VALUES = values(); + + public static SpaceViolationPolicy valueOf( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private SpaceViolationPolicy(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.SpaceViolationPolicy) + } + public interface TimedQuotaOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.TimedQuota) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -3419,6 +3572,19 @@ public final class QuotaProtos { * optional .hbase.pb.Throttle throttle = 2; */ org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleOrBuilder getThrottleOrBuilder(); + + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + boolean hasSpace(); + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getSpace(); + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder(); } /** * Protobuf type {@code hbase.pb.Quotas} @@ -3481,6 +3647,19 @@ public final class QuotaProtos { bitField0_ |= 0x00000002; break; } + case 26: { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = space_.toBuilder(); + } + space_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(space_); + space_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } } } } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { @@ -3542,6 +3721,27 @@ public final class QuotaProtos { return throttle_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance() : throttle_; } + public static final int SPACE_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota space_; + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public boolean hasSpace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getSpace() { + return space_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() { + return space_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_; + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; @@ -3566,6 +3766,9 @@ public final class QuotaProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, getThrottle()); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, getSpace()); + } unknownFields.writeTo(output); } @@ -3582,6 +3785,10 @@ public final class QuotaProtos { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeMessageSize(2, getThrottle()); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getSpace()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -3609,6 +3816,11 @@ public final class QuotaProtos { result = result && getThrottle() .equals(other.getThrottle()); } + result = result && (hasSpace() == other.hasSpace()); + if (hasSpace()) { + result = result && getSpace() + .equals(other.getSpace()); + } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -3629,6 +3841,10 @@ public final class QuotaProtos { hash = (37 * hash) + THROTTLE_FIELD_NUMBER; hash = (53 * hash) + getThrottle().hashCode(); } + if (hasSpace()) { + hash = (37 * hash) + SPACE_FIELD_NUMBER; + hash = (53 * hash) + getSpace().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -3744,6 +3960,7 @@ public final class QuotaProtos { if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getThrottleFieldBuilder(); + getSpaceFieldBuilder(); } } public Builder clear() { @@ -3756,6 +3973,12 @@ public final class QuotaProtos { throttleBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); + if (spaceBuilder_ == null) { + space_ = null; + } else { + spaceBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -3792,6 +4015,14 @@ public final class QuotaProtos { } else { result.throttle_ = throttleBuilder_.build(); } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (spaceBuilder_ == null) { + result.space_ = space_; + } else { + result.space_ = spaceBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3840,6 +4071,9 @@ public final class QuotaProtos { if (other.hasThrottle()) { mergeThrottle(other.getThrottle()); } + if (other.hasSpace()) { + mergeSpace(other.getSpace()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -4022,6 +4256,124 @@ public final class QuotaProtos { } return throttleBuilder_; } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota space_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> spaceBuilder_; + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public boolean hasSpace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getSpace() { + if (spaceBuilder_ == null) { + return space_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_; + } else { + return spaceBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder setSpace(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (spaceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + space_ = value; + onChanged(); + } else { + spaceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder setSpace( + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) { + if (spaceBuilder_ == null) { + space_ = builderForValue.build(); + onChanged(); + } else { + spaceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder mergeSpace(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (spaceBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + space_ != null && + space_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) { + space_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(space_).mergeFrom(value).buildPartial(); + } else { + space_ = value; + } + onChanged(); + } else { + spaceBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder clearSpace() { + if (spaceBuilder_ == null) { + space_ = null; + onChanged(); + } else { + spaceBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder getSpaceBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getSpaceFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() { + if (spaceBuilder_ != null) { + return spaceBuilder_.getMessageOrBuilder(); + } else { + return space_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_; + } + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> + getSpaceFieldBuilder() { + if (spaceBuilder_ == null) { + spaceBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>( + getSpace(), + getParentForChildren(), + isClean()); + space_ = null; + } + return spaceBuilder_; + } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); @@ -4444,75 +4796,1314 @@ public final class QuotaProtos { } - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_TimedQuota_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_TimedQuota_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_Throttle_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_Throttle_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ThrottleRequest_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_Quotas_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_Quotas_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_QuotaUsage_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_QuotaUsage_fieldAccessorTable; + public interface SpaceQuotaOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.SpaceQuota) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; + /** + *
+     * The limit of bytes for this quota
+     * 
+ * + * optional uint64 soft_limit = 1; + */ + boolean hasSoftLimit(); + /** + *
+     * The limit of bytes for this quota
+     * 
+ * + * optional uint64 soft_limit = 1; + */ + long getSoftLimit(); + + /** + *
+     * The action to take when the quota is violated
+     * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + boolean hasViolationPolicy(); + /** + *
+     * The action to take when the quota is violated
+     * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy(); } - private static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" + - "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" + - "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" + - "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" + - "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" + - "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" + - "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" + - "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" + - " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" + - " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030", - "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" + - "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" + - "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" + - "edQuota\"M\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" + - "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" + - "rottle\"\014\n\nQuotaUsage*&\n\nQuotaScope\022\013\n\007CL" + - "USTER\020\001\022\013\n\007MACHINE\020\002*v\n\014ThrottleType\022\022\n\016" + - "REQUEST_NUMBER\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WR" + - "ITE_NUMBER\020\003\022\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUM" + - "BER\020\005\022\r\n\tREAD_SIZE\020\006*\031\n\tQuotaType\022\014\n\010THR", - "OTTLE\020\001BH\n1org.apache.hadoop.hbase.shade" + - "d.protobuf.generatedB\013QuotaProtosH\001\210\001\001\240\001" + - "\001" - }; - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { - public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - return null; - } - }; - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] { + /** + *
+   * Defines a limit on the amount of filesystem space used by a table/namespace
+   * 
+ * + * Protobuf type {@code hbase.pb.SpaceQuota} + */ + public static final class SpaceQuota extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.SpaceQuota) + SpaceQuotaOrBuilder { + // Use SpaceQuota.newBuilder() to construct. + private SpaceQuota(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SpaceQuota() { + softLimit_ = 0L; + violationPolicy_ = 1; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SpaceQuota( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + softLimit_ = input.readUInt64(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy value = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + violationPolicy_ = rawValue; + } + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class); + } + + private int bitField0_; + public static final int SOFT_LIMIT_FIELD_NUMBER = 1; + private long softLimit_; + /** + *
+     * The limit of bytes for this quota
+     * 
+ * + * optional uint64 soft_limit = 1; + */ + public boolean hasSoftLimit() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * The limit of bytes for this quota
+     * 
+ * + * optional uint64 soft_limit = 1; + */ + public long getSoftLimit() { + return softLimit_; + } + + public static final int VIOLATION_POLICY_FIELD_NUMBER = 2; + private int violationPolicy_; + /** + *
+     * The action to take when the quota is violated
+     * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + public boolean hasViolationPolicy() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+     * The action to take when the quota is violated
+     * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy result = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(violationPolicy_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE : result; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, softLimit_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, violationPolicy_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, softLimit_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeEnumSize(2, violationPolicy_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota other = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota) obj; + + boolean result = true; + result = result && (hasSoftLimit() == other.hasSoftLimit()); + if (hasSoftLimit()) { + result = result && (getSoftLimit() + == other.getSoftLimit()); + } + result = result && (hasViolationPolicy() == other.hasViolationPolicy()); + if (hasViolationPolicy()) { + result = result && violationPolicy_ == other.violationPolicy_; + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSoftLimit()) { + hash = (37 * hash) + SOFT_LIMIT_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getSoftLimit()); + } + if (hasViolationPolicy()) { + hash = (37 * hash) + VIOLATION_POLICY_FIELD_NUMBER; + hash = (53 * hash) + violationPolicy_; + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     * Defines a limit on the amount of filesystem space used by a table/namespace
+     * 
+ * + * Protobuf type {@code hbase.pb.SpaceQuota} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.SpaceQuota) + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + softLimit_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + violationPolicy_ = 1; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota result = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.softLimit_ = softLimit_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.violationPolicy_ = violationPolicy_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) return this; + if (other.hasSoftLimit()) { + setSoftLimit(other.getSoftLimit()); + } + if (other.hasViolationPolicy()) { + setViolationPolicy(other.getViolationPolicy()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private long softLimit_ ; + /** + *
+       * The limit of bytes for this quota
+       * 
+ * + * optional uint64 soft_limit = 1; + */ + public boolean hasSoftLimit() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * The limit of bytes for this quota
+       * 
+ * + * optional uint64 soft_limit = 1; + */ + public long getSoftLimit() { + return softLimit_; + } + /** + *
+       * The limit of bytes for this quota
+       * 
+ * + * optional uint64 soft_limit = 1; + */ + public Builder setSoftLimit(long value) { + bitField0_ |= 0x00000001; + softLimit_ = value; + onChanged(); + return this; + } + /** + *
+       * The limit of bytes for this quota
+       * 
+ * + * optional uint64 soft_limit = 1; + */ + public Builder clearSoftLimit() { + bitField0_ = (bitField0_ & ~0x00000001); + softLimit_ = 0L; + onChanged(); + return this; + } + + private int violationPolicy_ = 1; + /** + *
+       * The action to take when the quota is violated
+       * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + public boolean hasViolationPolicy() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+       * The action to take when the quota is violated
+       * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy result = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(violationPolicy_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE : result; + } + /** + *
+       * The action to take when the quota is violated
+       * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + public Builder setViolationPolicy(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + violationPolicy_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
+       * The action to take when the quota is violated
+       * 
+ * + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + */ + public Builder clearViolationPolicy() { + bitField0_ = (bitField0_ & ~0x00000002); + violationPolicy_ = 1; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceQuota) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SpaceQuota) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public SpaceQuota parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new SpaceQuota(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface SpaceLimitRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.SpaceLimitRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + boolean hasQuota(); + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getQuota(); + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder(); + } + /** + *
+   * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+   * 
+ * + * Protobuf type {@code hbase.pb.SpaceLimitRequest} + */ + public static final class SpaceLimitRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.SpaceLimitRequest) + SpaceLimitRequestOrBuilder { + // Use SpaceLimitRequest.newBuilder() to construct. + private SpaceLimitRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SpaceLimitRequest() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SpaceLimitRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = quota_.toBuilder(); + } + quota_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(quota_); + quota_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class); + } + + private int bitField0_; + public static final int QUOTA_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota quota_; + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public boolean hasQuota() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getQuota() { + return quota_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() { + return quota_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getQuota()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getQuota()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest) obj; + + boolean result = true; + result = result && (hasQuota() == other.hasQuota()); + if (hasQuota()) { + result = result && getQuota() + .equals(other.getQuota()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasQuota()) { + hash = (37 * hash) + QUOTA_FIELD_NUMBER; + hash = (53 * hash) + getQuota().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+     * 
+ * + * Protobuf type {@code hbase.pb.SpaceLimitRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.SpaceLimitRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getQuotaFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (quotaBuilder_ == null) { + quota_ = null; + } else { + quotaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (quotaBuilder_ == null) { + result.quota_ = quota_; + } else { + result.quota_ = quotaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance()) return this; + if (other.hasQuota()) { + mergeQuota(other.getQuota()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota quota_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> quotaBuilder_; + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public boolean hasQuota() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getQuota() { + if (quotaBuilder_ == null) { + return quota_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_; + } else { + return quotaBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder setQuota(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (quotaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + quota_ = value; + onChanged(); + } else { + quotaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder setQuota( + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) { + if (quotaBuilder_ == null) { + quota_ = builderForValue.build(); + onChanged(); + } else { + quotaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder mergeQuota(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (quotaBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + quota_ != null && + quota_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) { + quota_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(quota_).mergeFrom(value).buildPartial(); + } else { + quota_ = value; + } + onChanged(); + } else { + quotaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder clearQuota() { + if (quotaBuilder_ == null) { + quota_ = null; + onChanged(); + } else { + quotaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder getQuotaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getQuotaFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() { + if (quotaBuilder_ != null) { + return quotaBuilder_.getMessageOrBuilder(); + } else { + return quota_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_; + } + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> + getQuotaFieldBuilder() { + if (quotaBuilder_ == null) { + quotaBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>( + getQuota(), + getParentForChildren(), + isClean()); + quota_ = null; + } + return quotaBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceLimitRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SpaceLimitRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public SpaceLimitRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new SpaceLimitRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TimedQuota_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_TimedQuota_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_Throttle_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_Throttle_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ThrottleRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_Quotas_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_Quotas_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_QuotaUsage_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_QuotaUsage_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SpaceQuota_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_SpaceQuota_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SpaceLimitRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" + + "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" + + "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" + + "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" + + "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" + + "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" + + "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" + + "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" + + " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" + + " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030", + "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" + + "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" + + "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" + + "edQuota\"r\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" + + "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" + + "rottle\022#\n\005space\030\003 \001(\0132\024.hbase.pb.SpaceQu" + + "ota\"\014\n\nQuotaUsage\"Z\n\nSpaceQuota\022\022\n\nsoft_" + + "limit\030\001 \001(\004\0228\n\020violation_policy\030\002 \001(\0162\036." + + "hbase.pb.SpaceViolationPolicy\"8\n\021SpaceLi" + + "mitRequest\022#\n\005quota\030\001 \001(\0132\024.hbase.pb.Spa", + "ceQuota*&\n\nQuotaScope\022\013\n\007CLUSTER\020\001\022\013\n\007MA" + + "CHINE\020\002*v\n\014ThrottleType\022\022\n\016REQUEST_NUMBE" + + "R\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WRITE_NUMBER\020\003\022" + + "\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUMBER\020\005\022\r\n\tREAD" + + "_SIZE\020\006*$\n\tQuotaType\022\014\n\010THROTTLE\020\001\022\t\n\005SP" + + "ACE\020\002*]\n\024SpaceViolationPolicy\022\013\n\007DISABLE" + + "\020\001\022\031\n\025NO_WRITES_COMPACTIONS\020\002\022\r\n\tNO_WRIT" + + "ES\020\003\022\016\n\nNO_INSERTS\020\004BH\n1org.apache.hadoo" + + "p.hbase.shaded.protobuf.generatedB\013Quota" + + "ProtosH\001\210\001\001\240\001\001" + }; + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(), }, assigner); internal_static_hbase_pb_TimedQuota_descriptor = @@ -4538,13 +6129,25 @@ public final class QuotaProtos { internal_static_hbase_pb_Quotas_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_Quotas_descriptor, - new java.lang.String[] { "BypassGlobals", "Throttle", }); + new java.lang.String[] { "BypassGlobals", "Throttle", "Space", }); internal_static_hbase_pb_QuotaUsage_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_QuotaUsage_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_QuotaUsage_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_SpaceQuota_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hbase_pb_SpaceQuota_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_SpaceQuota_descriptor, + new java.lang.String[] { "SoftLimit", "ViolationPolicy", }); + internal_static_hbase_pb_SpaceLimitRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_SpaceLimitRequest_descriptor, + new java.lang.String[] { "Quota", }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto index 9e6d1ed..5d5d7b6 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -521,6 +521,8 @@ message SetQuotaRequest { optional bool remove_all = 5; optional bool bypass_globals = 6; optional ThrottleRequest throttle = 7; + + optional SpaceLimitRequest space_limit = 8; } message SetQuotaResponse { diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto b/hbase-protocol-shaded/src/main/protobuf/Quota.proto index 240c535..b53219a 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto @@ -65,12 +65,33 @@ message ThrottleRequest { enum QuotaType { THROTTLE = 1; + SPACE = 2; } message Quotas { optional bool bypass_globals = 1 [default = false]; optional Throttle throttle = 2; + optional SpaceQuota space = 3; } message QuotaUsage { } + +// Defines what action should be taken when the SpaceQuota is violated +enum SpaceViolationPolicy { + DISABLE = 1; // Disable the table(s) + NO_WRITES_COMPACTIONS = 2; // No writes, bulk-loads, or compactions + NO_WRITES = 3; // No writes or bulk-loads + NO_INSERTS = 4; // No puts or bulk-loads, but deletes are allowed +} + +// Defines a limit on the amount of filesystem space used by a table/namespace +message SpaceQuota { + optional uint64 soft_limit = 1; // The limit of bytes for this quota + optional SpaceViolationPolicy violation_policy = 2; // The action to take when the quota is violated +} + +// The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota). +message SpaceLimitRequest { + optional SpaceQuota quota = 1; +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java index 05894b9..1925828 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java @@ -217,12 +217,20 @@ public final class QuotaProtos { * THROTTLE = 1; */ THROTTLE(0, 1), + /** + * SPACE = 2; + */ + SPACE(1, 2), ; /** * THROTTLE = 1; */ public static final int THROTTLE_VALUE = 1; + /** + * SPACE = 2; + */ + public static final int SPACE_VALUE = 2; public final int getNumber() { return value; } @@ -230,6 +238,7 @@ public final class QuotaProtos { public static QuotaType valueOf(int value) { switch (value) { case 1: return THROTTLE; + case 2: return SPACE; default: return null; } } @@ -281,6 +290,142 @@ public final class QuotaProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType) } + /** + * Protobuf enum {@code hbase.pb.SpaceViolationPolicy} + * + *
+   * Defines what action should be taken when the SpaceQuota is violated
+   * 
+ */ + public enum SpaceViolationPolicy + implements com.google.protobuf.ProtocolMessageEnum { + /** + * DISABLE = 1; + * + *
+     * Disable the table(s)
+     * 
+ */ + DISABLE(0, 1), + /** + * NO_WRITES_COMPACTIONS = 2; + * + *
+     * No writes, bulk-loads, or compactions
+     * 
+ */ + NO_WRITES_COMPACTIONS(1, 2), + /** + * NO_WRITES = 3; + * + *
+     * No writes or bulk-loads
+     * 
+ */ + NO_WRITES(2, 3), + /** + * NO_INSERTS = 4; + * + *
+     * No puts or bulk-loads, but deletes are allowed
+     * 
+ */ + NO_INSERTS(3, 4), + ; + + /** + * DISABLE = 1; + * + *
+     * Disable the table(s)
+     * 
+ */ + public static final int DISABLE_VALUE = 1; + /** + * NO_WRITES_COMPACTIONS = 2; + * + *
+     * No writes, bulk-loads, or compactions
+     * 
+ */ + public static final int NO_WRITES_COMPACTIONS_VALUE = 2; + /** + * NO_WRITES = 3; + * + *
+     * No writes or bulk-loads
+     * 
+ */ + public static final int NO_WRITES_VALUE = 3; + /** + * NO_INSERTS = 4; + * + *
+     * No puts or bulk-loads, but deletes are allowed
+     * 
+ */ + public static final int NO_INSERTS_VALUE = 4; + + + public final int getNumber() { return value; } + + public static SpaceViolationPolicy valueOf(int value) { + switch (value) { + case 1: return DISABLE; + case 2: return NO_WRITES_COMPACTIONS; + case 3: return NO_WRITES; + case 4: return NO_INSERTS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public SpaceViolationPolicy findValueByNumber(int number) { + return SpaceViolationPolicy.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3); + } + + private static final SpaceViolationPolicy[] VALUES = values(); + + public static SpaceViolationPolicy valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private SpaceViolationPolicy(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.SpaceViolationPolicy) + } + public interface TimedQuotaOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -3315,6 +3460,20 @@ public final class QuotaProtos { * optional .hbase.pb.Throttle throttle = 2; */ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder getThrottleOrBuilder(); + + // optional .hbase.pb.SpaceQuota space = 3; + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + boolean hasSpace(); + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getSpace(); + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder(); } /** * Protobuf type {@code hbase.pb.Quotas} @@ -3385,6 +3544,19 @@ public final class QuotaProtos { bitField0_ |= 0x00000002; break; } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = space_.toBuilder(); + } + space_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(space_); + space_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -3463,9 +3635,32 @@ public final class QuotaProtos { return throttle_; } + // optional .hbase.pb.SpaceQuota space = 3; + public static final int SPACE_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota space_; + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public boolean hasSpace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getSpace() { + return space_; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() { + return space_; + } + private void initFields() { bypassGlobals_ = false; throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance(); + space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3491,6 +3686,9 @@ public final class QuotaProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, throttle_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, space_); + } getUnknownFields().writeTo(output); } @@ -3508,6 +3706,10 @@ public final class QuotaProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, throttle_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, space_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -3541,6 +3743,11 @@ public final class QuotaProtos { result = result && getThrottle() .equals(other.getThrottle()); } + result = result && (hasSpace() == other.hasSpace()); + if (hasSpace()) { + result = result && getSpace() + .equals(other.getSpace()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3562,6 +3769,10 @@ public final class QuotaProtos { hash = (37 * hash) + THROTTLE_FIELD_NUMBER; hash = (53 * hash) + getThrottle().hashCode(); } + if (hasSpace()) { + hash = (37 * hash) + SPACE_FIELD_NUMBER; + hash = (53 * hash) + getSpace().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -3664,6 +3875,7 @@ public final class QuotaProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getThrottleFieldBuilder(); + getSpaceFieldBuilder(); } } private static Builder create() { @@ -3680,6 +3892,12 @@ public final class QuotaProtos { throttleBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); + if (spaceBuilder_ == null) { + space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + } else { + spaceBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -3720,6 +3938,14 @@ public final class QuotaProtos { } else { result.throttle_ = throttleBuilder_.build(); } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (spaceBuilder_ == null) { + result.space_ = space_; + } else { + result.space_ = spaceBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3742,6 +3968,9 @@ public final class QuotaProtos { if (other.hasThrottle()) { mergeThrottle(other.getThrottle()); } + if (other.hasSpace()) { + mergeSpace(other.getSpace()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -3925,6 +4154,123 @@ public final class QuotaProtos { return throttleBuilder_; } + // optional .hbase.pb.SpaceQuota space = 3; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> spaceBuilder_; + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public boolean hasSpace() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getSpace() { + if (spaceBuilder_ == null) { + return space_; + } else { + return spaceBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder setSpace(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (spaceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + space_ = value; + onChanged(); + } else { + spaceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder setSpace( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) { + if (spaceBuilder_ == null) { + space_ = builderForValue.build(); + onChanged(); + } else { + spaceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder mergeSpace(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (spaceBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + space_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) { + space_ = + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(space_).mergeFrom(value).buildPartial(); + } else { + space_ = value; + } + onChanged(); + } else { + spaceBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public Builder clearSpace() { + if (spaceBuilder_ == null) { + space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + onChanged(); + } else { + spaceBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder getSpaceBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getSpaceFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() { + if (spaceBuilder_ != null) { + return spaceBuilder_.getMessageOrBuilder(); + } else { + return space_; + } + } + /** + * optional .hbase.pb.SpaceQuota space = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> + getSpaceFieldBuilder() { + if (spaceBuilder_ == null) { + spaceBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>( + space_, + getParentForChildren(), + isClean()); + space_ = null; + } + return spaceBuilder_; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.Quotas) } @@ -4274,81 +4620,1257 @@ public final class QuotaProtos { // @@protoc_insertion_point(class_scope:hbase.pb.QuotaUsage) } - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_TimedQuota_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_TimedQuota_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_Throttle_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_Throttle_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ThrottleRequest_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_Quotas_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_Quotas_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_QuotaUsage_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_QuotaUsage_fieldAccessorTable; + public interface SpaceQuotaOrBuilder + extends com.google.protobuf.MessageOrBuilder { - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; + // optional uint64 soft_limit = 1; + /** + * optional uint64 soft_limit = 1; + * + *
+     * The limit of bytes for this quota
+     * 
+ */ + boolean hasSoftLimit(); + /** + * optional uint64 soft_limit = 1; + * + *
+     * The limit of bytes for this quota
+     * 
+ */ + long getSoftLimit(); + + // optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+     * The action to take when the quota is violated
+     * 
+ */ + boolean hasViolationPolicy(); + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+     * The action to take when the quota is violated
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy(); } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" + - "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" + - "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" + - "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" + - "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" + - "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" + - "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" + - "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" + - " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" + - " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030", - "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" + - "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" + - "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" + - "edQuota\"M\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" + - "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" + - "rottle\"\014\n\nQuotaUsage*&\n\nQuotaScope\022\013\n\007CL" + - "USTER\020\001\022\013\n\007MACHINE\020\002*v\n\014ThrottleType\022\022\n\016" + - "REQUEST_NUMBER\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WR" + - "ITE_NUMBER\020\003\022\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUM" + - "BER\020\005\022\r\n\tREAD_SIZE\020\006*\031\n\tQuotaType\022\014\n\010THR", - "OTTLE\020\001BA\n*org.apache.hadoop.hbase.proto" + - "buf.generatedB\013QuotaProtosH\001\210\001\001\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_hbase_pb_TimedQuota_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_hbase_pb_TimedQuota_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_TimedQuota_descriptor, - new java.lang.String[] { "TimeUnit", "SoftLimit", "Share", "Scope", }); - internal_static_hbase_pb_Throttle_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_hbase_pb_Throttle_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_Throttle_descriptor, - new java.lang.String[] { "ReqNum", "ReqSize", "WriteNum", "WriteSize", "ReadNum", "ReadSize", }); - internal_static_hbase_pb_ThrottleRequest_descriptor = + /** + * Protobuf type {@code hbase.pb.SpaceQuota} + * + *
+   * Defines a limit on the amount of filesystem space used by a table/namespace
+   * 
+ */ + public static final class SpaceQuota extends + com.google.protobuf.GeneratedMessage + implements SpaceQuotaOrBuilder { + // Use SpaceQuota.newBuilder() to construct. + private SpaceQuota(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SpaceQuota(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SpaceQuota defaultInstance; + public static SpaceQuota getDefaultInstance() { + return defaultInstance; + } + + public SpaceQuota getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SpaceQuota( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + softLimit_ = input.readUInt64(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy value = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + violationPolicy_ = value; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SpaceQuota parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SpaceQuota(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint64 soft_limit = 1; + public static final int SOFT_LIMIT_FIELD_NUMBER = 1; + private long softLimit_; + /** + * optional uint64 soft_limit = 1; + * + *
+     * The limit of bytes for this quota
+     * 
+ */ + public boolean hasSoftLimit() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 soft_limit = 1; + * + *
+     * The limit of bytes for this quota
+     * 
+ */ + public long getSoftLimit() { + return softLimit_; + } + + // optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + public static final int VIOLATION_POLICY_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy violationPolicy_; + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+     * The action to take when the quota is violated
+     * 
+ */ + public boolean hasViolationPolicy() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+     * The action to take when the quota is violated
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() { + return violationPolicy_; + } + + private void initFields() { + softLimit_ = 0L; + violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, softLimit_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, violationPolicy_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, softLimit_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, violationPolicy_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota) obj; + + boolean result = true; + result = result && (hasSoftLimit() == other.hasSoftLimit()); + if (hasSoftLimit()) { + result = result && (getSoftLimit() + == other.getSoftLimit()); + } + result = result && (hasViolationPolicy() == other.hasViolationPolicy()); + if (hasViolationPolicy()) { + result = result && + (getViolationPolicy() == other.getViolationPolicy()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSoftLimit()) { + hash = (37 * hash) + SOFT_LIMIT_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSoftLimit()); + } + if (hasViolationPolicy()) { + hash = (37 * hash) + VIOLATION_POLICY_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getViolationPolicy()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SpaceQuota} + * + *
+     * Defines a limit on the amount of filesystem space used by a table/namespace
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + softLimit_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota build() { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.softLimit_ = softLimit_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.violationPolicy_ = violationPolicy_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) return this; + if (other.hasSoftLimit()) { + setSoftLimit(other.getSoftLimit()); + } + if (other.hasViolationPolicy()) { + setViolationPolicy(other.getViolationPolicy()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint64 soft_limit = 1; + private long softLimit_ ; + /** + * optional uint64 soft_limit = 1; + * + *
+       * The limit of bytes for this quota
+       * 
+ */ + public boolean hasSoftLimit() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 soft_limit = 1; + * + *
+       * The limit of bytes for this quota
+       * 
+ */ + public long getSoftLimit() { + return softLimit_; + } + /** + * optional uint64 soft_limit = 1; + * + *
+       * The limit of bytes for this quota
+       * 
+ */ + public Builder setSoftLimit(long value) { + bitField0_ |= 0x00000001; + softLimit_ = value; + onChanged(); + return this; + } + /** + * optional uint64 soft_limit = 1; + * + *
+       * The limit of bytes for this quota
+       * 
+ */ + public Builder clearSoftLimit() { + bitField0_ = (bitField0_ & ~0x00000001); + softLimit_ = 0L; + onChanged(); + return this; + } + + // optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE; + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+       * The action to take when the quota is violated
+       * 
+ */ + public boolean hasViolationPolicy() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+       * The action to take when the quota is violated
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() { + return violationPolicy_; + } + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+       * The action to take when the quota is violated
+       * 
+ */ + public Builder setViolationPolicy(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + violationPolicy_ = value; + onChanged(); + return this; + } + /** + * optional .hbase.pb.SpaceViolationPolicy violation_policy = 2; + * + *
+       * The action to take when the quota is violated
+       * 
+ */ + public Builder clearViolationPolicy() { + bitField0_ = (bitField0_ & ~0x00000002); + violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceQuota) + } + + static { + defaultInstance = new SpaceQuota(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SpaceQuota) + } + + public interface SpaceLimitRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.SpaceQuota quota = 1; + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + boolean hasQuota(); + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getQuota(); + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.SpaceLimitRequest} + * + *
+   * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+   * 
+ */ + public static final class SpaceLimitRequest extends + com.google.protobuf.GeneratedMessage + implements SpaceLimitRequestOrBuilder { + // Use SpaceLimitRequest.newBuilder() to construct. + private SpaceLimitRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SpaceLimitRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SpaceLimitRequest defaultInstance; + public static SpaceLimitRequest getDefaultInstance() { + return defaultInstance; + } + + public SpaceLimitRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SpaceLimitRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = quota_.toBuilder(); + } + quota_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(quota_); + quota_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SpaceLimitRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SpaceLimitRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.SpaceQuota quota = 1; + public static final int QUOTA_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota quota_; + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public boolean hasQuota() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getQuota() { + return quota_; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() { + return quota_; + } + + private void initFields() { + quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, quota_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, quota_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest) obj; + + boolean result = true; + result = result && (hasQuota() == other.hasQuota()); + if (hasQuota()) { + result = result && getQuota() + .equals(other.getQuota()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasQuota()) { + hash = (37 * hash) + QUOTA_FIELD_NUMBER; + hash = (53 * hash) + getQuota().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SpaceLimitRequest} + * + *
+     * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getQuotaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (quotaBuilder_ == null) { + quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + } else { + quotaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest build() { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (quotaBuilder_ == null) { + result.quota_ = quota_; + } else { + result.quota_ = quotaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance()) return this; + if (other.hasQuota()) { + mergeQuota(other.getQuota()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.SpaceQuota quota = 1; + private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> quotaBuilder_; + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public boolean hasQuota() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getQuota() { + if (quotaBuilder_ == null) { + return quota_; + } else { + return quotaBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder setQuota(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (quotaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + quota_ = value; + onChanged(); + } else { + quotaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder setQuota( + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) { + if (quotaBuilder_ == null) { + quota_ = builderForValue.build(); + onChanged(); + } else { + quotaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder mergeQuota(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) { + if (quotaBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + quota_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) { + quota_ = + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(quota_).mergeFrom(value).buildPartial(); + } else { + quota_ = value; + } + onChanged(); + } else { + quotaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public Builder clearQuota() { + if (quotaBuilder_ == null) { + quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance(); + onChanged(); + } else { + quotaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder getQuotaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getQuotaFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() { + if (quotaBuilder_ != null) { + return quotaBuilder_.getMessageOrBuilder(); + } else { + return quota_; + } + } + /** + * optional .hbase.pb.SpaceQuota quota = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> + getQuotaFieldBuilder() { + if (quotaBuilder_ == null) { + quotaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>( + quota_, + getParentForChildren(), + isClean()); + quota_ = null; + } + return quotaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceLimitRequest) + } + + static { + defaultInstance = new SpaceLimitRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SpaceLimitRequest) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TimedQuota_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TimedQuota_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_Throttle_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_Throttle_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ThrottleRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_Quotas_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_Quotas_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_QuotaUsage_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_QuotaUsage_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SpaceQuota_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SpaceQuota_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SpaceLimitRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" + + "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" + + "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" + + "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" + + "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" + + "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" + + "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" + + "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" + + " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" + + " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030", + "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" + + "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" + + "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" + + "edQuota\"r\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" + + "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" + + "rottle\022#\n\005space\030\003 \001(\0132\024.hbase.pb.SpaceQu" + + "ota\"\014\n\nQuotaUsage\"Z\n\nSpaceQuota\022\022\n\nsoft_" + + "limit\030\001 \001(\004\0228\n\020violation_policy\030\002 \001(\0162\036." + + "hbase.pb.SpaceViolationPolicy\"8\n\021SpaceLi" + + "mitRequest\022#\n\005quota\030\001 \001(\0132\024.hbase.pb.Spa", + "ceQuota*&\n\nQuotaScope\022\013\n\007CLUSTER\020\001\022\013\n\007MA" + + "CHINE\020\002*v\n\014ThrottleType\022\022\n\016REQUEST_NUMBE" + + "R\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WRITE_NUMBER\020\003\022" + + "\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUMBER\020\005\022\r\n\tREAD" + + "_SIZE\020\006*$\n\tQuotaType\022\014\n\010THROTTLE\020\001\022\t\n\005SP" + + "ACE\020\002*]\n\024SpaceViolationPolicy\022\013\n\007DISABLE" + + "\020\001\022\031\n\025NO_WRITES_COMPACTIONS\020\002\022\r\n\tNO_WRIT" + + "ES\020\003\022\016\n\nNO_INSERTS\020\004BA\n*org.apache.hadoo" + + "p.hbase.protobuf.generatedB\013QuotaProtosH" + + "\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_TimedQuota_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_TimedQuota_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TimedQuota_descriptor, + new java.lang.String[] { "TimeUnit", "SoftLimit", "Share", "Scope", }); + internal_static_hbase_pb_Throttle_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_Throttle_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_Throttle_descriptor, + new java.lang.String[] { "ReqNum", "ReqSize", "WriteNum", "WriteSize", "ReadNum", "ReadSize", }); + internal_static_hbase_pb_ThrottleRequest_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( @@ -4359,13 +5881,25 @@ public final class QuotaProtos { internal_static_hbase_pb_Quotas_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Quotas_descriptor, - new java.lang.String[] { "BypassGlobals", "Throttle", }); + new java.lang.String[] { "BypassGlobals", "Throttle", "Space", }); internal_static_hbase_pb_QuotaUsage_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_QuotaUsage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_QuotaUsage_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_SpaceQuota_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hbase_pb_SpaceQuota_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SpaceQuota_descriptor, + new java.lang.String[] { "SoftLimit", "ViolationPolicy", }); + internal_static_hbase_pb_SpaceLimitRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SpaceLimitRequest_descriptor, + new java.lang.String[] { "Quota", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Quota.proto b/hbase-protocol/src/main/protobuf/Quota.proto index a8303b1..c12b997 100644 --- a/hbase-protocol/src/main/protobuf/Quota.proto +++ b/hbase-protocol/src/main/protobuf/Quota.proto @@ -65,12 +65,33 @@ message ThrottleRequest { enum QuotaType { THROTTLE = 1; + SPACE = 2; } message Quotas { optional bool bypass_globals = 1 [default = false]; optional Throttle throttle = 2; + optional SpaceQuota space = 3; } message QuotaUsage { } + +// Defines what action should be taken when the SpaceQuota is violated +enum SpaceViolationPolicy { + DISABLE = 1; // Disable the table(s) + NO_WRITES_COMPACTIONS = 2; // No writes, bulk-loads, or compactions + NO_WRITES = 3; // No writes or bulk-loads + NO_INSERTS = 4; // No puts or bulk-loads, but deletes are allowed +} + +// Defines a limit on the amount of filesystem space used by a table/namespace +message SpaceQuota { + optional uint64 soft_limit = 1; // The limit of bytes for this quota + optional SpaceViolationPolicy violation_policy = 2; // The action to take when the quota is violated +} + +// The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota). +message SpaceLimitRequest { + optional SpaceQuota quota = 1; +} -- 2.10.2 From de09c62bde2730b1560a0712949c2865c4d25ebf Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Thu, 3 Nov 2016 14:04:01 -0400 Subject: [PATCH 2/3] HBASE-16996 Basic crud operations around the new quota type in the quota table --- .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 12 ++- .../hadoop/hbase/quotas/MasterQuotaManager.java | 29 +++++ .../apache/hadoop/hbase/quotas/TestQuotaAdmin.java | 119 ++++++++++++++++++++- 3 files changed, 157 insertions(+), 3 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 116dd0c..511aab4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -52,7 +52,9 @@ import org.apache.hadoop.hbase.util.Strings; *
  *     ROW-KEY      FAM/QUAL        DATA
  *   n.<namespace> q:s         <global-quotas>
+ *   n.<namespace> u:du        <size in bytes>
  *   t.<table>     q:s         <global-quotas>
+ *   t.<table>     u:du        <size in bytes>
  *   u.<user>      q:s         <global-quotas>
  *   u.<user>      q:s.<table> <table-quotas>
  *   u.<user>      q:s.<ns>:   <namespace-quotas>
@@ -71,6 +73,7 @@ public class QuotaTableUtil {
   protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
   protected static final byte[] QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
   protected static final byte[] QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
+  protected static final byte[] QUOTA_QUALIFIER_DISKUSAGE = Bytes.toBytes("du");
   protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
   protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
   protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
@@ -298,11 +301,15 @@ public class QuotaTableUtil {
    *  Quotas protobuf helpers
    */
   protected static Quotas quotasFromData(final byte[] data) throws IOException {
+    return quotasFromData(data, 0, data.length);
+  }
+
+  protected static Quotas quotasFromData(final byte[] data, int offset, int length) throws IOException {
     int magicLen = ProtobufMagic.lengthOfPBMagic();
-    if (!ProtobufMagic.isPBMagicPrefix(data, 0, magicLen)) {
+    if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) {
       throw new IOException("Missing pb magic prefix");
     }
-    return Quotas.parseFrom(new ByteArrayInputStream(data, magicLen, data.length - magicLen));
+    return Quotas.parseFrom(new ByteArrayInputStream(data, offset + magicLen, length - magicLen));
   }
 
   protected static byte[] quotasToData(final Quotas data) throws IOException {
@@ -316,6 +323,7 @@ public class QuotaTableUtil {
     boolean hasSettings = false;
     hasSettings |= quotas.hasThrottle();
     hasSettings |= quotas.hasBypassGlobals();
+    hasSettings |= quotas.hasSpace();
     return !hasSettings;
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 647a770..9cceb5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota;
@@ -293,9 +295,11 @@ public class MasterQuotaManager implements RegionStateListener {
     Quotas quotas = quotaOps.fetch();
     quotaOps.preApply(quotas);
 
+    // Copy the user request into the Quotas object
     Quotas.Builder builder = (quotas != null) ? quotas.toBuilder() : Quotas.newBuilder();
     if (req.hasThrottle()) applyThrottle(builder, req.getThrottle());
     if (req.hasBypassGlobals()) applyBypassGlobals(builder, req.getBypassGlobals());
+    if (req.hasSpaceLimit()) applySpaceLimit(builder, req.getSpaceLimit());
 
     // Submit new changes
     quotas = builder.build();
@@ -437,6 +441,31 @@ public class MasterQuotaManager implements RegionStateListener {
     }
   }
 
+  /**
+   * Adds the information from the provided {@link SpaceLimitRequest} to the {@link Quotas} builder.
+   *
+   * @param quotas The builder to update.
+   * @param req The request to extract space quota information from.
+   */
+  void applySpaceLimit(final Quotas.Builder quotas, final SpaceLimitRequest req) {
+    if (req.hasQuota()) {
+      applySpaceQuota(quotas, req.getQuota());
+    }
+  }
+
+  /**
+   * Merges the provided {@link SpaceQuota} into the given {@link Quotas} builder.
+   *
+   * @param quotas The Quotas builder instance to update
+   * @param quota The SpaceQuota instance to update from
+   */
+  void applySpaceQuota(final Quotas.Builder quotas, final SpaceQuota quota) {
+    // Create a builder for Quotas
+    SpaceQuota.Builder builder = quotas.hasSpace() ? quotas.getSpace().toBuilder() : SpaceQuota.newBuilder();
+    // Update the values from the provided quota into the new one and set it on Quotas.
+    quotas.setSpace(builder.mergeFrom(quota).build());
+  }
+
   private void validateTimedQuota(final TimedQuota timedQuota) throws IOException {
     if (timedQuota.getSoftLimit() < 1) {
       throw new DoNotRetryIOException(new UnsupportedOperationException(
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index d6e8952..1550df7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -22,20 +22,32 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import com.google.common.collect.Iterables;
+
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
@@ -233,10 +245,115 @@ public class TestQuotaAdmin {
     assertNumResults(0, null);
   }
 
+  @Test
+  public void testSetAndGetSpaceQuota() throws Exception {
+    Admin admin = TEST_UTIL.getAdmin();
+    final TableName tn = TableName.valueOf("table1");
+    final long sizeLimit = 1024L * 1024L * 1024L * 1024L * 5L; // 5TB
+    final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_WRITES;
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, violationPolicy);
+    admin.setQuota(settings);
+
+    // Verify the Quotas in the table
+    try (Table quotaTable = TEST_UTIL.getConnection().getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
+      ResultScanner scanner = quotaTable.getScanner(new Scan());
+      try {
+        Result r = Iterables.getOnlyElement(scanner);
+        CellScanner cells = r.cellScanner();
+        assertTrue("Expected to find a cell", cells.advance());
+        assertSpaceQuota(sizeLimit, violationPolicy, cells.current());
+      } finally {
+        scanner.close();
+      }
+    }
+
+    // Verify we can retrieve it via the QuotaRetriever API
+    QuotaRetriever scanner = QuotaRetriever.open(admin.getConfiguration());
+    try {
+      assertSpaceQuota(sizeLimit, violationPolicy, Iterables.getOnlyElement(scanner));
+    } finally {
+      scanner.close();
+    }
+  }
+
+  @Test
+  public void testSetAndModifyQuota() throws Exception {
+    Admin admin = TEST_UTIL.getAdmin();
+    final TableName tn = TableName.valueOf("table1");
+    final long originalSizeLimit = 1024L * 1024L * 1024L * 1024L * 5L; // 5TB
+    final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_WRITES;
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, originalSizeLimit, violationPolicy);
+    admin.setQuota(settings);
+
+    // Verify the Quotas in the table
+    try (Table quotaTable = TEST_UTIL.getConnection().getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
+      ResultScanner scanner = quotaTable.getScanner(new Scan());
+      try {
+        Result r = Iterables.getOnlyElement(scanner);
+        CellScanner cells = r.cellScanner();
+        assertTrue("Expected to find a cell", cells.advance());
+        assertSpaceQuota(originalSizeLimit, violationPolicy, cells.current());
+      } finally {
+        scanner.close();
+      }
+    }
+
+    // Verify we can retrieve it via the QuotaRetriever API
+    QuotaRetriever quotaScanner = QuotaRetriever.open(admin.getConfiguration());
+    try {
+      assertSpaceQuota(originalSizeLimit, violationPolicy, Iterables.getOnlyElement(quotaScanner));
+    } finally {
+      quotaScanner.close();
+    }
+
+    // Setting a new size and policy should be reflected
+    final long newSizeLimit = 1024L * 1024L * 1024L * 1024L; // 1TB
+    final SpaceViolationPolicy newViolationPolicy = SpaceViolationPolicy.NO_WRITES_COMPACTIONS;
+    QuotaSettings newSettings = QuotaSettingsFactory.limitTableSpace(tn, newSizeLimit, newViolationPolicy);
+    admin.setQuota(newSettings);
+
+    // Verify the new Quotas in the table
+    try (Table quotaTable = TEST_UTIL.getConnection().getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
+      ResultScanner scanner = quotaTable.getScanner(new Scan());
+      try {
+        Result r = Iterables.getOnlyElement(scanner);
+        CellScanner cells = r.cellScanner();
+        assertTrue("Expected to find a cell", cells.advance());
+        assertSpaceQuota(newSizeLimit, newViolationPolicy, cells.current());
+      } finally {
+        scanner.close();
+      }
+    }
+
+    // Verify we can retrieve the new quota via the QuotaRetriever API
+    quotaScanner = QuotaRetriever.open(admin.getConfiguration());
+    try {
+      assertSpaceQuota(newSizeLimit, newViolationPolicy, Iterables.getOnlyElement(quotaScanner));
+    } finally {
+      quotaScanner.close();
+    }
+  }
+
   private void assertNumResults(int expected, final QuotaFilter filter) throws Exception {
     assertEquals(expected, countResults(filter));
   }
 
+  private void assertSpaceQuota(long sizeLimit, SpaceViolationPolicy violationPolicy, Cell cell) throws Exception {
+    Quotas q = QuotaTableUtil.quotasFromData(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+    assertTrue("Quota should have space quota defined", q.hasSpace());
+    QuotaProtos.SpaceQuota spaceQuota = q.getSpace();
+    assertEquals(sizeLimit, spaceQuota.getSoftLimit());
+    assertEquals(violationPolicy, ProtobufUtil.toViolationPolicy(spaceQuota.getViolationPolicy()));
+  }
+
+  private void assertSpaceQuota(long sizeLimit, SpaceViolationPolicy violationPolicy, QuotaSettings actualSettings) {
+    assertTrue("The actual QuotaSettings was not an instance of " + SpaceLimitSettings.class + " but of "
+        + actualSettings.getClass(), actualSettings instanceof SpaceLimitSettings);
+    SpaceLimitRequest spaceLimitRequest = ((SpaceLimitSettings) actualSettings).getProto();
+    assertEquals(sizeLimit, spaceLimitRequest.getQuota().getSoftLimit());
+    assertEquals(violationPolicy, ProtobufUtil.toViolationPolicy(spaceLimitRequest.getQuota().getViolationPolicy()));
+  }
+
   private int countResults(final QuotaFilter filter) throws Exception {
     QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration(), filter);
     try {
-- 
2.10.2


From 0b939116fc6f96f6d6d45b12db08de4a70988394 Mon Sep 17 00:00:00 2001
From: Josh Elser 
Date: Mon, 7 Nov 2016 13:46:42 -0500
Subject: [PATCH 3/3] HBASE-17000 Implement computation of online region sizes
 and report to the Master

Includes a trivial implementation of the Master-side collection to
avoid. Only enough to write a test to verify RS collection.
---
 .../generated/RegionServerStatusProtos.java        | 2071 +++++++++++++++++++-
 .../src/main/protobuf/RegionServerStatus.proto     |   22 +
 .../hadoop/hbase/master/MasterRpcServices.java     |   19 +
 .../hbase/quotas/FileSystemUtilizationChore.java   |  130 ++
 .../hadoop/hbase/quotas/MasterQuotaManager.java    |   15 +
 .../hadoop/hbase/regionserver/HRegionServer.java   |   72 +
 .../quotas/TestFileSystemUtilizationChore.java     |  214 ++
 .../hadoop/hbase/quotas/TestRegionSizeUse.java     |  193 ++
 .../TestRegionServerRegionSpaceUseReport.java      |   99 +
 9 files changed, 2813 insertions(+), 22 deletions(-)
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java

diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index 8f368e9..b4c073c 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -10164,6 +10164,1912 @@ public final class RegionServerStatusProtos {
 
   }
 
+  public interface RegionSpaceUseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * 
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + boolean hasRegion(); + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion(); + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); + + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + boolean hasSize(); + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + long getSize(); + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUse} + */ + public static final class RegionSpaceUse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUse) + RegionSpaceUseOrBuilder { + // Use RegionSpaceUse.newBuilder() to construct. + private RegionSpaceUse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RegionSpaceUse() { + size_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + size_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder.class); + } + + private int bitField0_; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_; + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } + + public static final int SIZE_FIELD_NUMBER = 2; + private long size_; + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + public boolean hasSize() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + public long getSize() { + return size_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasRegion()) { + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getRegion()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, size_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getRegion()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, size_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && (hasSize() == other.hasSize()); + if (hasSize()) { + result = result && (getSize() + == other.getSize()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + if (hasSize()) { + hash = (37 * hash) + SIZE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getSize()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUse) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = null; + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + size_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.size_ = size_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + if (other.hasSize()) { + setSize(other.getSize()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (hasRegion()) { + if (!getRegion().isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + if (regionBuilder_ == null) { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != null && + region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = null; + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getRegion(), + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + private long size_ ; + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public boolean hasSize() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public long getSize() { + return size_; + } + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public Builder setSize(long value) { + bitField0_ |= 0x00000002; + size_ = value; + onChanged(); + return this; + } + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public Builder clearSize() { + bitField0_ = (bitField0_ & ~0x00000002); + size_ = 0L; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public RegionSpaceUse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new RegionSpaceUse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface RegionSpaceUseReportRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUseReportRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + java.util.List + getSpaceUseList(); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + int getSpaceUseCount(); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + java.util.List + getSpaceUseOrBuilderList(); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder( + int index); + } + /** + *
+   **
+   * Reports filesystem usage for regions.
+   * 
+ * + * Protobuf type {@code hbase.pb.RegionSpaceUseReportRequest} + */ + public static final class RegionSpaceUseReportRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUseReportRequest) + RegionSpaceUseReportRequestOrBuilder { + // Use RegionSpaceUseReportRequest.newBuilder() to construct. + private RegionSpaceUseReportRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RegionSpaceUseReportRequest() { + spaceUse_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionSpaceUseReportRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + spaceUse_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = java.util.Collections.unmodifiableList(spaceUse_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.Builder.class); + } + + public static final int SPACE_USE_FIELD_NUMBER = 1; + private java.util.List spaceUse_; + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List getSpaceUseList() { + return spaceUse_; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List + getSpaceUseOrBuilderList() { + return spaceUse_; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public int getSpaceUseCount() { + return spaceUse_.size(); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index) { + return spaceUse_.get(index); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder( + int index) { + return spaceUse_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getSpaceUseCount(); i++) { + if (!getSpaceUse(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < spaceUse_.size(); i++) { + output.writeMessage(1, spaceUse_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < spaceUse_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, spaceUse_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) obj; + + boolean result = true; + result = result && getSpaceUseList() + .equals(other.getSpaceUseList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getSpaceUseCount() > 0) { + hash = (37 * hash) + SPACE_USE_FIELD_NUMBER; + hash = (53 * hash) + getSpaceUseList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     **
+     * Reports filesystem usage for regions.
+     * 
+ * + * Protobuf type {@code hbase.pb.RegionSpaceUseReportRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUseReportRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getSpaceUseFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (spaceUseBuilder_ == null) { + spaceUse_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + spaceUseBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest(this); + int from_bitField0_ = bitField0_; + if (spaceUseBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = java.util.Collections.unmodifiableList(spaceUse_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.spaceUse_ = spaceUse_; + } else { + result.spaceUse_ = spaceUseBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance()) return this; + if (spaceUseBuilder_ == null) { + if (!other.spaceUse_.isEmpty()) { + if (spaceUse_.isEmpty()) { + spaceUse_ = other.spaceUse_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSpaceUseIsMutable(); + spaceUse_.addAll(other.spaceUse_); + } + onChanged(); + } + } else { + if (!other.spaceUse_.isEmpty()) { + if (spaceUseBuilder_.isEmpty()) { + spaceUseBuilder_.dispose(); + spaceUseBuilder_ = null; + spaceUse_ = other.spaceUse_; + bitField0_ = (bitField0_ & ~0x00000001); + spaceUseBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getSpaceUseFieldBuilder() : null; + } else { + spaceUseBuilder_.addAllMessages(other.spaceUse_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getSpaceUseCount(); i++) { + if (!getSpaceUse(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List spaceUse_ = + java.util.Collections.emptyList(); + private void ensureSpaceUseIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = new java.util.ArrayList(spaceUse_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> spaceUseBuilder_; + + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List getSpaceUseList() { + if (spaceUseBuilder_ == null) { + return java.util.Collections.unmodifiableList(spaceUse_); + } else { + return spaceUseBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public int getSpaceUseCount() { + if (spaceUseBuilder_ == null) { + return spaceUse_.size(); + } else { + return spaceUseBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index) { + if (spaceUseBuilder_ == null) { + return spaceUse_.get(index); + } else { + return spaceUseBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder setSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) { + if (spaceUseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSpaceUseIsMutable(); + spaceUse_.set(index, value); + onChanged(); + } else { + spaceUseBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder setSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.set(index, builderForValue.build()); + onChanged(); + } else { + spaceUseBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) { + if (spaceUseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSpaceUseIsMutable(); + spaceUse_.add(value); + onChanged(); + } else { + spaceUseBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) { + if (spaceUseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSpaceUseIsMutable(); + spaceUse_.add(index, value); + onChanged(); + } else { + spaceUseBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.add(builderForValue.build()); + onChanged(); + } else { + spaceUseBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.add(index, builderForValue.build()); + onChanged(); + } else { + spaceUseBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addAllSpaceUse( + java.lang.Iterable values) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, spaceUse_); + onChanged(); + } else { + spaceUseBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder clearSpaceUse() { + if (spaceUseBuilder_ == null) { + spaceUse_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + spaceUseBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder removeSpaceUse(int index) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.remove(index); + onChanged(); + } else { + spaceUseBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder getSpaceUseBuilder( + int index) { + return getSpaceUseFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder( + int index) { + if (spaceUseBuilder_ == null) { + return spaceUse_.get(index); } else { + return spaceUseBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List + getSpaceUseOrBuilderList() { + if (spaceUseBuilder_ != null) { + return spaceUseBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(spaceUse_); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder addSpaceUseBuilder() { + return getSpaceUseFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder addSpaceUseBuilder( + int index) { + return getSpaceUseFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List + getSpaceUseBuilderList() { + return getSpaceUseFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> + getSpaceUseFieldBuilder() { + if (spaceUseBuilder_ == null) { + spaceUseBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>( + spaceUse_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + spaceUse_ = null; + } + return spaceUseBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUseReportRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUseReportRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public RegionSpaceUseReportRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new RegionSpaceUseReportRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface RegionSpaceUseReportResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUseReportResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUseReportResponse} + */ + public static final class RegionSpaceUseReportResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUseReportResponse) + RegionSpaceUseReportResponseOrBuilder { + // Use RegionSpaceUseReportResponse.newBuilder() to construct. + private RegionSpaceUseReportResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RegionSpaceUseReportResponse() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionSpaceUseReportResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) obj; + + boolean result = true; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUseReportResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUseReportResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse(this); + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUseReportResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUseReportResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public RegionSpaceUseReportResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new RegionSpaceUseReportResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + /** * Protobuf service {@code hbase.pb.RegionServerStatusService} */ @@ -10265,6 +12171,19 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+       **
+       * Reports Region filesystem space use
+       * 
+ * + * rpc ReportRegionSpaceUse(.hbase.pb.RegionSpaceUseReportRequest) returns (.hbase.pb.RegionSpaceUseReportResponse); + */ + public abstract void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService( @@ -10326,6 +12245,14 @@ public final class RegionServerStatusProtos { impl.getProcedureResult(controller, request, done); } + @java.lang.Override + public void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.reportRegionSpaceUse(controller, request, done); + } + }; } @@ -10362,6 +12289,8 @@ public final class RegionServerStatusProtos { return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)request); case 6: return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); + case 7: + return impl.reportRegionSpaceUse(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10390,6 +12319,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10418,6 +12349,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10519,6 +12452,19 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+     **
+     * Reports Region filesystem space use
+     * 
+ * + * rpc ReportRegionSpaceUse(.hbase.pb.RegionSpaceUseReportRequest) returns (.hbase.pb.RegionSpaceUseReportResponse); + */ + public abstract void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -10576,6 +12522,11 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 7: + this.reportRegionSpaceUse(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -10604,6 +12555,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10632,6 +12585,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10757,6 +12712,21 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance())); } + + public void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -10799,6 +12769,11 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -10891,6 +12866,18 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.RegionServerStatusService) @@ -10961,6 +12948,21 @@ public final class RegionServerStatusProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RegionSpaceUse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -11006,28 +13008,35 @@ public final class RegionServerStatusProtos { "est\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" + "onInfo\022\021\n\tsplit_row\030\002 \002(\014\022\026\n\013nonce_group" + "\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"+\n\030SplitTabl" + - "eRegionResponse\022\017\n\007proc_id\030\001 \001(\0042\347\005\n\031Reg" + - "ionServerStatusService\022b\n\023RegionServerSt" + - "artup\022$.hbase.pb.RegionServerStartupRequ" + - "est\032%.hbase.pb.RegionServerStartupRespon", - "se\022_\n\022RegionServerReport\022#.hbase.pb.Regi" + - "onServerReportRequest\032$.hbase.pb.RegionS" + - "erverReportResponse\022_\n\022ReportRSFatalErro" + - "r\022#.hbase.pb.ReportRSFatalErrorRequest\032$" + - ".hbase.pb.ReportRSFatalErrorResponse\022q\n\030" + - "GetLastFlushedSequenceId\022).hbase.pb.GetL" + - "astFlushedSequenceIdRequest\032*.hbase.pb.G" + - "etLastFlushedSequenceIdResponse\022z\n\033Repor" + - "tRegionStateTransition\022,.hbase.pb.Report" + - "RegionStateTransitionRequest\032-.hbase.pb.", - "ReportRegionStateTransitionResponse\022T\n\013S" + - "plitRegion\022!.hbase.pb.SplitTableRegionRe" + - "quest\032\".hbase.pb.SplitTableRegionRespons" + - "e\022_\n\022getProcedureResult\022#.hbase.pb.GetPr" + - "ocedureResultRequest\032$.hbase.pb.GetProce" + - "dureResultResponseBU\n1org.apache.hadoop." + - "hbase.shaded.protobuf.generatedB\030RegionS" + - "erverStatusProtosH\001\210\001\001\240\001\001" + "eRegionResponse\022\017\n\007proc_id\030\001 \001(\004\"D\n\016Regi" + + "onSpaceUse\022$\n\006region\030\001 \001(\0132\024.hbase.pb.Re" + + "gionInfo\022\014\n\004size\030\002 \001(\004\"J\n\033RegionSpaceUse" + + "ReportRequest\022+\n\tspace_use\030\001 \003(\0132\030.hbase", + ".pb.RegionSpaceUse\"\036\n\034RegionSpaceUseRepo" + + "rtResponse2\316\006\n\031RegionServerStatusService" + + "\022b\n\023RegionServerStartup\022$.hbase.pb.Regio" + + "nServerStartupRequest\032%.hbase.pb.RegionS" + + "erverStartupResponse\022_\n\022RegionServerRepo" + + "rt\022#.hbase.pb.RegionServerReportRequest\032" + + "$.hbase.pb.RegionServerReportResponse\022_\n" + + "\022ReportRSFatalError\022#.hbase.pb.ReportRSF" + + "atalErrorRequest\032$.hbase.pb.ReportRSFata" + + "lErrorResponse\022q\n\030GetLastFlushedSequence", + "Id\022).hbase.pb.GetLastFlushedSequenceIdRe" + + "quest\032*.hbase.pb.GetLastFlushedSequenceI" + + "dResponse\022z\n\033ReportRegionStateTransition" + + "\022,.hbase.pb.ReportRegionStateTransitionR" + + "equest\032-.hbase.pb.ReportRegionStateTrans" + + "itionResponse\022T\n\013SplitRegion\022!.hbase.pb." + + "SplitTableRegionRequest\032\".hbase.pb.Split" + + "TableRegionResponse\022_\n\022getProcedureResul" + + "t\022#.hbase.pb.GetProcedureResultRequest\032$" + + ".hbase.pb.GetProcedureResultResponse\022e\n\024", + "ReportRegionSpaceUse\022%.hbase.pb.RegionSp" + + "aceUseReportRequest\032&.hbase.pb.RegionSpa" + + "ceUseReportResponseBU\n1org.apache.hadoop" + + ".hbase.shaded.protobuf.generatedB\030Region" + + "ServerStatusProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -11122,6 +13131,24 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SplitTableRegionResponse_descriptor, new java.lang.String[] { "ProcId", }); + internal_static_hbase_pb_RegionSpaceUse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_RegionSpaceUse_descriptor, + new java.lang.String[] { "Region", "Size", }); + internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor, + new java.lang.String[] { "SpaceUse", }); + internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor, + new java.lang.String[] { }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(); diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto index 1c373ee..23ddd43 100644 --- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto @@ -141,6 +141,22 @@ message SplitTableRegionResponse { optional uint64 proc_id = 1; } +message RegionSpaceUse { + optional RegionInfo region = 1; // A region identifier + optional uint64 size = 2; // The size in bytes of the region +} + +/** + * Reports filesystem usage for regions. + */ +message RegionSpaceUseReportRequest { + repeated RegionSpaceUse space_use = 1; +} + +message RegionSpaceUseReportResponse { + +} + service RegionServerStatusService { /** Called when a region server first starts. */ rpc RegionServerStartup(RegionServerStartupRequest) @@ -182,4 +198,10 @@ service RegionServerStatusService { */ rpc getProcedureResult(GetProcedureResultRequest) returns(GetProcedureResultResponse); + + /** + * Reports Region filesystem space use + */ + rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest) + returns(RegionSpaceUseReportResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 97eb209..b3ce537 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; +import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -79,6 +80,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; @@ -1606,4 +1610,19 @@ public class MasterRpcServices extends RSRpcServices } return null; } + + @Override + public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller, + RegionSpaceUseReportRequest request) throws ServiceException { + try { + master.checkInitialized(); + MasterQuotaManager quotaManager = this.master.getMasterQuotaManager(); + for (RegionSpaceUse report : request.getSpaceUseList()) { + quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), report.getSize()); + } + return RegionSpaceUseReportResponse.newBuilder().build(); + } catch (Exception e) { + throw new ServiceException(e); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java new file mode 100644 index 0000000..fa4ae1b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Store; + +/** + * A chore which computes the size of each {@link HRegion} on the FileSystem hosted by the given {@link HRegionServer}. + */ +public class FileSystemUtilizationChore extends ScheduledChore { + private static final Log LOG = LogFactory.getLog(FileSystemUtilizationChore.class); + static final String FS_UTILIZATION_CHORE_PERIOD_KEY = "hbase.regionserver.quotas.fs.utilization.chore.period"; + static final int FS_UTILIZATION_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 minutes in millis + + static final String FS_UTILIZATION_CHORE_DELAY_KEY = "hbase.regionserver.quotas.fs.utilization.chore.delay"; + static final long FS_UTILIZATION_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 minute + + static final String FS_UTILIZATION_CHORE_TIMEUNIT_KEY = "hbase.regionserver.quotas.fs.utilization.chore.timeunit"; + static final String FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT = TimeUnit.MILLISECONDS.name(); + + private final HRegionServer rs; + + public FileSystemUtilizationChore(HRegionServer rs) { + super(FileSystemUtilizationChore.class.getSimpleName(), rs, getPeriod(rs.getConfiguration()), + getInitialDelay(rs.getConfiguration()), getTimeUnit(rs.getConfiguration())); + this.rs = rs; + } + + @Override + protected void chore() { + // TODO Chore threads are shared -- cannot "hog" the chore thread. + // * Should we impose an upper bound on regions having size calculated? Maximum time spent? + // * How do we track "leftovers" if we bail out early? A: Keep state of onlineRegions on the Chore since its a singleton? + final Map onlineRegionSizes = new HashMap<>(); + final List onlineRegions = rs.getOnlineRegions(); + long regionSizesCalculated = 0L; + for (Region onlineRegion : onlineRegions) { + final long sizeInBytes = computeSize(onlineRegion); + onlineRegionSizes.put(onlineRegion.getRegionInfo(), sizeInBytes); + regionSizesCalculated++; + } + if (LOG.isTraceEnabled()) { + LOG.trace("Computed the size of " + regionSizesCalculated + " Regions."); + } + reportRegionSizesToMaster(onlineRegionSizes); + } + + /** + * Computes total FileSystem size for the given {@link Region}. + * + * @param r The region + * @return The size, in bytes, of the Region. + */ + long computeSize(Region r) { + long regionSize = 0L; + for (Store store : r.getStores()) { + // StoreFile/StoreFileReaders are already instantiated with the file length cached. Can avoid extra NN ops. + regionSize += store.getStorefilesSize(); + } + return regionSize; + } + + /** + * Reports the computed region sizes to the currently active Master. + * + * @param onlineRegionSizes The computed region sizes to report. + */ + void reportRegionSizesToMaster(Map onlineRegionSizes) { + this.rs.reportRegionSizesForQuotas(onlineRegionSizes); + } + + /** + * Extracts the period for the chore from the configuration. + * + * @param conf The configuration object. + * @return The configured chore period or the default value. + */ + static int getPeriod(Configuration conf) { + return conf.getInt(FS_UTILIZATION_CHORE_PERIOD_KEY, FS_UTILIZATION_CHORE_PERIOD_DEFAULT); + } + + /** + * Extracts the initial delay for the chore from the configuration. + * + * @param conf The configuration object. + * @return The configured chore initial delay or the default value. + */ + static long getInitialDelay(Configuration conf) { + return conf.getLong(FS_UTILIZATION_CHORE_DELAY_KEY, FS_UTILIZATION_CHORE_DELAY_DEFAULT); + } + + /** + * Extracts the time unit for the chore period and initial delay from the configuration. The configuration + * value for {@link #FS_UTILIZATION_CHORE_TIMEUNIT_KEY} must correspond to a {@link TimeUnit} value. + * + * @param conf The configuration object. + * @return The configured time unit for the chore period and initial delay or the default value. + */ + static TimeUnit getTimeUnit(Configuration conf) { + return TimeUnit.valueOf(conf.get(FS_UTILIZATION_CHORE_TIMEUNIT_KEY, + FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT)); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 9cceb5e..1429b62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -19,7 +19,10 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -62,6 +65,7 @@ public class MasterQuotaManager implements RegionStateListener { private NamedLock userLocks; private boolean enabled = false; private NamespaceAuditor namespaceQuotaManager; + private ConcurrentHashMap regionSizes; public MasterQuotaManager(final MasterServices masterServices) { this.masterServices = masterServices; @@ -85,6 +89,7 @@ public class MasterQuotaManager implements RegionStateListener { namespaceLocks = new NamedLock(); tableLocks = new NamedLock(); userLocks = new NamedLock(); + regionSizes = new ConcurrentHashMap<>(); namespaceQuotaManager = new NamespaceAuditor(masterServices); namespaceQuotaManager.start(); @@ -514,5 +519,15 @@ public class MasterQuotaManager implements RegionStateListener { this.namespaceQuotaManager.removeRegionFromNamespaceUsage(hri); } } + + public void addRegionSize(HRegionInfo hri, long size) { + // TODO Make proper API + regionSizes.put(hri, size); + } + + public Map snapshotRegionSizes() { + // TODO Make proper API + return new HashMap<>(regionSizes); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 89f7a05..846c41c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -36,6 +36,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Map.Entry; import java.util.Set; import java.util.SortedMap; @@ -72,6 +73,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; @@ -113,6 +115,7 @@ import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.mob.MobCacheConfig; import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost; +import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore; import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager; import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; @@ -146,12 +149,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpeci import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; @@ -503,6 +509,8 @@ public class HRegionServer extends HasThread implements protected final SecureBulkLoadManager secureBulkLoadManager; + protected FileSystemUtilizationChore fsUtilizationChore; + /** * Starts a HRegionServer at the default location. */ @@ -907,6 +915,8 @@ public class HRegionServer extends HasThread implements // Setup the Quota Manager rsQuotaManager = new RegionServerQuotaManager(this); + this.fsUtilizationChore = new FileSystemUtilizationChore(this); + // Setup RPC client for master communication rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress( rpcServices.isa.getAddress(), 0), clusterConnection.getConnectionMetrics()); @@ -1219,6 +1229,66 @@ public class HRegionServer extends HasThread implements } } + /** + * Reports the given map of Regions and their size on the filesystem to the active Master. + * + * @param onlineRegionSizes A map of region info to size in bytes + */ + public void reportRegionSizesForQuotas(final Map onlineRegionSizes) { + RegionServerStatusService.BlockingInterface rss = rssStub; + if (rss == null) { + // the current server could be stopping. + LOG.trace("Skipping Region size report to HMaster as stub is null"); + return; + } + try { + RegionSpaceUseReportRequest request = buildRegionSpaceUseReportRequest( + Objects.requireNonNull(onlineRegionSizes)); + rss.reportRegionSpaceUse(null, request); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); + if (ioe instanceof PleaseHoldException) { + LOG.trace("Failed to report region sizes to Master because it is initializing. This will be retried.", ioe); + // The Master is coming up. Will retry the report later. Avoid re-creating the stub. + return; + } + LOG.debug("Failed to report region sizes to Master. This will be retried.", ioe); + if (rssStub == rss) { + rssStub = null; + } + createRegionServerStatusStub(true); + } + } + + /** + * Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map. + * + * @param regionSizes Map of region info to size in bytes. + * @return The corresponding protocol buffer message. + */ + RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(Map regionSizes) { + RegionSpaceUseReportRequest.Builder request = RegionSpaceUseReportRequest.newBuilder(); + for (Entry entry : Objects.requireNonNull(regionSizes).entrySet()) { + request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue())); + } + return request.build(); + } + + /** + * Converts a pair of {@link HRegionInfo} and {@code long} into a {@link RegionSpaceUse} + * protobuf message. + * + * @param regionInfo The HRegionInfo + * @param sizeInBytes The size in bytes of the Region + * @return The protocol buffer + */ + RegionSpaceUse convertRegionSize(HRegionInfo regionInfo, Long sizeInBytes) { + return RegionSpaceUse.newBuilder() + .setRegion(HRegionInfo.convert(Objects.requireNonNull(regionInfo))) + .setSize(Objects.requireNonNull(sizeInBytes)) + .build(); + } + ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) throws IOException { // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests @@ -1793,6 +1863,7 @@ public class HRegionServer extends HasThread implements if (this.nonceManagerChore != null) choreService.scheduleChore(nonceManagerChore); if (this.storefileRefresher != null) choreService.scheduleChore(storefileRefresher); if (this.movedRegionsCleaner != null) choreService.scheduleChore(movedRegionsCleaner); + if (this.fsUtilizationChore != null) choreService.scheduleChore(fsUtilizationChore); // Leases is not a Thread. Internally it runs a daemon thread. If it gets // an unhandled exception, it will just exit. @@ -2304,6 +2375,7 @@ public class HRegionServer extends HasThread implements if (this.healthCheckChore != null) healthCheckChore.cancel(true); if (this.storefileRefresher != null) storefileRefresher.cancel(true); if (this.movedRegionsCleaner != null) movedRegionsCleaner.cancel(true); + if (this.fsUtilizationChore != null) fsUtilizationChore.cancel(true); if (this.cacheFlusher != null) { this.cacheFlusher.join(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java new file mode 100644 index 0000000..18f8532 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +/** + * Test class for {@link FileSystemUtilizationChore}. + */ +@Category(SmallTests.class) +public class TestFileSystemUtilizationChore { + + @Test + public void testNoOnlineRegions() { + // One region with a store size of one. + final List regionSizes = Collections.emptyList(); + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) + .when(rs) + .reportRegionSizesForQuotas((Map) any(Map.class)); + + final Region region = mockRegionWithSize(regionSizes); + when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region)); + chore.chore(); + } + + @Test + public void testRegionSizes() { + // One region with a store size of one. + final List regionSizes = Arrays.asList(1024L); + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) + .when(rs) + .reportRegionSizesForQuotas((Map) any(Map.class)); + + final Region region = mockRegionWithSize(regionSizes); + when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region)); + chore.chore(); + } + + @Test + public void testMultipleRegionSizes() { + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + + // Three regions with multiple store sizes + final List r1Sizes = Arrays.asList(1024L, 2048L); + final long r1Sum = sum(r1Sizes); + final List r2Sizes = Arrays.asList(1024L * 1024L); + final long r2Sum = sum(r2Sizes); + final List r3Sizes = Arrays.asList(10L * 1024L * 1024L); + final long r3Sum = sum(r3Sizes); + + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum, r2Sum, r3Sum)))) + .when(rs) + .reportRegionSizesForQuotas((Map) any(Map.class)); + + final Region r1 = mockRegionWithSize(r1Sizes); + final Region r2 = mockRegionWithSize(r2Sizes); + final Region r3 = mockRegionWithSize(r3Sizes); + when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3)); + chore.chore(); + } + + @Test + public void testDefaultConfigurationProperties() { + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + // Verify that the expected default values are actually represented. + assertEquals(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_DEFAULT, chore.getPeriod()); + assertEquals(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_DEFAULT, chore.getInitialDelay()); + assertEquals(TimeUnit.valueOf(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT), chore.getTimeUnit()); + } + + @Test + public void testNonDefaultConfigurationProperties() { + final Configuration conf = getDefaultHBaseConfiguration(); + // Override the default values + final int period = 60 * 10; + final long delay = 30L; + final TimeUnit timeUnit = TimeUnit.SECONDS; + conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, period); + conf.setLong(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, delay); + conf.set(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_KEY, timeUnit.name()); + + // Verify that the chore reports these non-default values + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + assertEquals(period, chore.getPeriod()); + assertEquals(delay, chore.getInitialDelay()); + assertEquals(timeUnit, chore.getTimeUnit()); + } + + /** + * Creates an HBase Configuration object for the default values. + */ + private Configuration getDefaultHBaseConfiguration() { + final Configuration conf = HBaseConfiguration.create(); + conf.addResource("hbase-default.xml"); + return conf; + } + + /** + * Creates an HRegionServer using the given Configuration. + */ + private HRegionServer mockRegionServer(Configuration conf) { + final HRegionServer rs = mock(HRegionServer.class); + when(rs.getConfiguration()).thenReturn(conf); + return rs; + } + + /** + * Sums the collection of non-null numbers. + */ + private long sum(Collection values) { + long sum = 0L; + for (Long value : values) { + assertNotNull(value); + sum += value; + } + return sum; + } + + /** + * Creates a region with a number of Stores equal to the length of {@code storeSizes}. Each + * {@link Store} will have a reported size corresponding to the element in {@code storeSizes}. + * + * @param storeSizes A list of sizes for each Store. + * @return A list of Mocks. + */ + private Region mockRegionWithSize(Collection storeSizes) { + final Region r = mock(Region.class); + final HRegionInfo info = mock(HRegionInfo.class); + when(r.getRegionInfo()).thenReturn(info); + List stores = new ArrayList<>(); + when(r.getStores()).thenReturn(stores); + for (Long storeSize : storeSizes) { + final Store s = mock(Store.class); + stores.add(s); + when(s.getStorefilesSize()).thenReturn(storeSize); + } + return r; + } + + /** + * An Answer implementation which verifies the sum of the Region sizes to report is as expected. + */ + private static class ExpectedRegionSizeSummationAnswer implements Answer { + private final long expectedSize; + + public ExpectedRegionSizeSummationAnswer(long expectedSize) { + this.expectedSize = expectedSize; + } + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + assertEquals(1, args.length); + @SuppressWarnings("unchecked") + Map regionSizes = (Map) args[0]; + long sum = 0L; + for (Long regionSize : regionSizes.values()) { + sum += regionSize; + } + assertEquals(expectedSize, sum); + return null; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java new file mode 100644 index 0000000..0d92ae8 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Test class which verifies that region sizes are reported to the master. + */ +@Category(MediumTests.class) +public class TestRegionSizeUse { + private static final Log LOG = LogFactory.getLog(TestRegionSizeUse.class); + private static final int SIZE_PER_VALUE = 256; + private static final int NUM_SPLITS = 10; + private static final String F1 = "f1"; + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private MiniHBaseCluster cluster; + + @Rule + public TestName testName = new TestName(); + + @Before + public void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 1000); + conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 1000); + conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + cluster = TEST_UTIL.startMiniCluster(2); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testBasicRegionSizeReports() throws Exception { + final long bytesWritten = 5L * 1024L * 1024L; // 5MB + final TableName tn = writeData(bytesWritten); + LOG.debug("Data was written to HBase"); + final Admin admin = TEST_UTIL.getAdmin(); + // Push the data to disk. + admin.flush(tn); + LOG.debug("Data flushed to disk"); + // Get the final region distribution + final List regions = TEST_UTIL.getAdmin().getTableRegions(tn); + + HMaster master = cluster.getMaster(); + MasterQuotaManager quotaManager = master.getMasterQuotaManager(); + Map regionSizes = quotaManager.snapshotRegionSizes(); + // Wait until we get all of the region reports for our table + // The table may split, so make sure we have at least as many as expected right after we + // finished writing the data. + int observedRegions = numRegionsForTable(tn, regionSizes); + while (observedRegions < regions.size()) { + LOG.debug("Expecting more regions. Saw " + observedRegions + " region sizes reported, expected at least " + regions.size()); + Thread.sleep(1000); + regionSizes = quotaManager.snapshotRegionSizes(); + observedRegions = numRegionsForTable(tn, regionSizes); + } + + LOG.debug("Observed region sizes by the HMaster: " + regionSizes); + long totalRegionSize = 0L; + for (Long regionSize : regionSizes.values()) { + totalRegionSize += regionSize; + } + assertTrue("Expected region size report to exceed " + bytesWritten + ", but was " + totalRegionSize + + ". RegionSizes=" + regionSizes, bytesWritten < totalRegionSize); + } + + /** + * Writes at least {@code sizeInBytes} bytes of data to HBase and returns the TableName used. + * + * @param sizeInBytes The amount of data to write in bytes. + * @return The table the data was written to + */ + private TableName writeData(long sizeInBytes) throws IOException { + final Connection conn = TEST_UTIL.getConnection(); + final Admin admin = TEST_UTIL.getAdmin(); + final TableName tn = TableName.valueOf(testName.getMethodName()); + + // Delete the old table + if (admin.tableExists(tn)) { + admin.disableTable(tn); + admin.deleteTable(tn); + } + + // Create the table + HTableDescriptor tableDesc = new HTableDescriptor(tn); + tableDesc.addFamily(new HColumnDescriptor(F1)); + admin.createTable(tableDesc, Bytes.toBytes("1"), Bytes.toBytes("9"), NUM_SPLITS); + + final Table table = conn.getTable(tn); + try { + List updates = new ArrayList<>(); + long bytesToWrite = sizeInBytes; + long rowKeyId = 0L; + final StringBuilder sb = new StringBuilder(); + final Random r = new Random(); + while (bytesToWrite > 0L) { + sb.setLength(0); + sb.append(Long.toString(rowKeyId)); + // Use the reverse counter as the rowKey to get even spread across all regions + Put p = new Put(Bytes.toBytes(sb.reverse().toString())); + byte[] value = new byte[SIZE_PER_VALUE]; + r.nextBytes(value); + p.addColumn(Bytes.toBytes(F1), Bytes.toBytes("q1"), value); + updates.add(p); + + // Batch 50K worth of updates + if (updates.size() > 50) { + table.put(updates); + updates.clear(); + } + + // Just count the value size, ignore the size of rowkey + column + bytesToWrite -= SIZE_PER_VALUE; + rowKeyId++; + } + + // Write the final batch + if (!updates.isEmpty()) { + table.put(updates); + } + + return tn; + } finally { + table.close(); + } + } + + /** + * Computes the number of regions for the given table that have a positive size. + * + * @param tn The TableName in question + * @param regions A collection of region sizes + * @return The number of regions for the given table. + */ + private int numRegionsForTable(TableName tn, Map regions) { + int sum = 0; + for (Entry entry : regions.entrySet()) { + if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) { + sum++; + } + } + return sum; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java new file mode 100644 index 0000000..3244681 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.mock; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test class for isolated (non-cluster) tests surrounding the report + * of Region space use to the Master by RegionServers. + */ +@Category(SmallTests.class) +public class TestRegionServerRegionSpaceUseReport { + + @Test + public void testConversion() { + TableName tn = TableName.valueOf("table1"); + HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b")); + HRegionInfo hri2 = new HRegionInfo(tn, Bytes.toBytes("b"), Bytes.toBytes("c")); + HRegionInfo hri3 = new HRegionInfo(tn, Bytes.toBytes("c"), Bytes.toBytes("d")); + Map sizes = new HashMap<>(); + sizes.put(hri1, 1024L * 1024L); + sizes.put(hri2, 1024L * 1024L * 8L); + sizes.put(hri3, 1024L * 1024L * 32L); + + // Call the real method to convert the map into a protobuf + HRegionServer rs = mock(HRegionServer.class); + doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class)); + doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong()); + + RegionSpaceUseReportRequest requests = rs.buildRegionSpaceUseReportRequest(sizes); + assertEquals(sizes.size(), requests.getSpaceUseCount()); + for (RegionSpaceUse spaceUse : requests.getSpaceUseList()) { + RegionInfo ri = spaceUse.getRegion(); + HRegionInfo hri = HRegionInfo.convert(ri); + Long expectedSize = sizes.remove(hri); + assertNotNull("Could not find size for HRI: " + hri, expectedSize); + assertEquals(expectedSize.longValue(), spaceUse.getSize()); + } + assertTrue("Should not have any space use entries left: " + sizes, sizes.isEmpty()); + } + + @Test(expected = NullPointerException.class) + public void testNullMap() { + // Call the real method to convert the map into a protobuf + HRegionServer rs = mock(HRegionServer.class); + doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class)); + doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong()); + + rs.buildRegionSpaceUseReportRequest(null); + } + + @Test(expected = NullPointerException.class) + public void testMalformedMap() { + TableName tn = TableName.valueOf("table1"); + HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b")); + Map sizes = new HashMap<>(); + sizes.put(hri1, null); + + // Call the real method to convert the map into a protobuf + HRegionServer rs = mock(HRegionServer.class); + doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class)); + doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong()); + + rs.buildRegionSpaceUseReportRequest(sizes); + } +} -- 2.10.2