From 12c52e4a0e96432442ef8770169257ab0b0c177a Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Mon, 7 Nov 2016 13:46:42 -0500 Subject: [PATCH] HBASE-17000 Implement computation of online region sizes and report to the Master Includes a trivial implementation of the Master-side collection to avoid. Only enough to write a test to verify RS collection. --- .../generated/RegionServerStatusProtos.java | 2071 +++++++++++++++++++- .../src/main/protobuf/RegionServerStatus.proto | 22 + .../hadoop/hbase/master/MasterRpcServices.java | 19 + .../hbase/quotas/FileSystemUtilizationChore.java | 157 ++ .../hadoop/hbase/quotas/MasterQuotaManager.java | 15 + .../hadoop/hbase/regionserver/HRegionServer.java | 72 + .../quotas/TestFileSystemUtilizationChore.java | 221 +++ .../hadoop/hbase/quotas/TestRegionSizeUse.java | 194 ++ .../TestRegionServerRegionSpaceUseReport.java | 99 + 9 files changed, 2848 insertions(+), 22 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java index 8f368e9..b4c073c 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java @@ -10164,6 +10164,1912 @@ public final class RegionServerStatusProtos { } + public interface RegionSpaceUseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + boolean hasRegion(); + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion(); + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); + + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + boolean hasSize(); + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + long getSize(); + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUse} + */ + public static final class RegionSpaceUse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUse) + RegionSpaceUseOrBuilder { + // Use RegionSpaceUse.newBuilder() to construct. + private RegionSpaceUse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RegionSpaceUse() { + size_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + size_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder.class); + } + + private int bitField0_; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_; + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } + /** + *
+     * A region identifier
+     * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } + + public static final int SIZE_FIELD_NUMBER = 2; + private long size_; + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + public boolean hasSize() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+     * The size in bytes of the region
+     * 
+ * + * optional uint64 size = 2; + */ + public long getSize() { + return size_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasRegion()) { + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getRegion()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, size_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getRegion()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, size_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && (hasSize() == other.hasSize()); + if (hasSize()) { + result = result && (getSize() + == other.getSize()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + if (hasSize()) { + hash = (37 * hash) + SIZE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getSize()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUse) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = null; + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + size_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.size_ = size_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + if (other.hasSize()) { + setSize(other.getSize()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (hasRegion()) { + if (!getRegion().isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + if (regionBuilder_ == null) { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != null && + region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = null; + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_; + } + } + /** + *
+       * A region identifier
+       * 
+ * + * optional .hbase.pb.RegionInfo region = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getRegion(), + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + private long size_ ; + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public boolean hasSize() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public long getSize() { + return size_; + } + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public Builder setSize(long value) { + bitField0_ |= 0x00000002; + size_ = value; + onChanged(); + return this; + } + /** + *
+       * The size in bytes of the region
+       * 
+ * + * optional uint64 size = 2; + */ + public Builder clearSize() { + bitField0_ = (bitField0_ & ~0x00000002); + size_ = 0L; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public RegionSpaceUse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new RegionSpaceUse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface RegionSpaceUseReportRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUseReportRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + java.util.List + getSpaceUseList(); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + int getSpaceUseCount(); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + java.util.List + getSpaceUseOrBuilderList(); + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder( + int index); + } + /** + *
+   **
+   * Reports filesystem usage for regions.
+   * 
+ * + * Protobuf type {@code hbase.pb.RegionSpaceUseReportRequest} + */ + public static final class RegionSpaceUseReportRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUseReportRequest) + RegionSpaceUseReportRequestOrBuilder { + // Use RegionSpaceUseReportRequest.newBuilder() to construct. + private RegionSpaceUseReportRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RegionSpaceUseReportRequest() { + spaceUse_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionSpaceUseReportRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + spaceUse_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = java.util.Collections.unmodifiableList(spaceUse_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.Builder.class); + } + + public static final int SPACE_USE_FIELD_NUMBER = 1; + private java.util.List spaceUse_; + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List getSpaceUseList() { + return spaceUse_; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List + getSpaceUseOrBuilderList() { + return spaceUse_; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public int getSpaceUseCount() { + return spaceUse_.size(); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index) { + return spaceUse_.get(index); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder( + int index) { + return spaceUse_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getSpaceUseCount(); i++) { + if (!getSpaceUse(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < spaceUse_.size(); i++) { + output.writeMessage(1, spaceUse_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < spaceUse_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, spaceUse_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) obj; + + boolean result = true; + result = result && getSpaceUseList() + .equals(other.getSpaceUseList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getSpaceUseCount() > 0) { + hash = (37 * hash) + SPACE_USE_FIELD_NUMBER; + hash = (53 * hash) + getSpaceUseList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     **
+     * Reports filesystem usage for regions.
+     * 
+ * + * Protobuf type {@code hbase.pb.RegionSpaceUseReportRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUseReportRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getSpaceUseFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (spaceUseBuilder_ == null) { + spaceUse_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + spaceUseBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest(this); + int from_bitField0_ = bitField0_; + if (spaceUseBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = java.util.Collections.unmodifiableList(spaceUse_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.spaceUse_ = spaceUse_; + } else { + result.spaceUse_ = spaceUseBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance()) return this; + if (spaceUseBuilder_ == null) { + if (!other.spaceUse_.isEmpty()) { + if (spaceUse_.isEmpty()) { + spaceUse_ = other.spaceUse_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSpaceUseIsMutable(); + spaceUse_.addAll(other.spaceUse_); + } + onChanged(); + } + } else { + if (!other.spaceUse_.isEmpty()) { + if (spaceUseBuilder_.isEmpty()) { + spaceUseBuilder_.dispose(); + spaceUseBuilder_ = null; + spaceUse_ = other.spaceUse_; + bitField0_ = (bitField0_ & ~0x00000001); + spaceUseBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getSpaceUseFieldBuilder() : null; + } else { + spaceUseBuilder_.addAllMessages(other.spaceUse_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getSpaceUseCount(); i++) { + if (!getSpaceUse(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List spaceUse_ = + java.util.Collections.emptyList(); + private void ensureSpaceUseIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + spaceUse_ = new java.util.ArrayList(spaceUse_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> spaceUseBuilder_; + + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List getSpaceUseList() { + if (spaceUseBuilder_ == null) { + return java.util.Collections.unmodifiableList(spaceUse_); + } else { + return spaceUseBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public int getSpaceUseCount() { + if (spaceUseBuilder_ == null) { + return spaceUse_.size(); + } else { + return spaceUseBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index) { + if (spaceUseBuilder_ == null) { + return spaceUse_.get(index); + } else { + return spaceUseBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder setSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) { + if (spaceUseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSpaceUseIsMutable(); + spaceUse_.set(index, value); + onChanged(); + } else { + spaceUseBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder setSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.set(index, builderForValue.build()); + onChanged(); + } else { + spaceUseBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) { + if (spaceUseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSpaceUseIsMutable(); + spaceUse_.add(value); + onChanged(); + } else { + spaceUseBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) { + if (spaceUseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSpaceUseIsMutable(); + spaceUse_.add(index, value); + onChanged(); + } else { + spaceUseBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.add(builderForValue.build()); + onChanged(); + } else { + spaceUseBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addSpaceUse( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.add(index, builderForValue.build()); + onChanged(); + } else { + spaceUseBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder addAllSpaceUse( + java.lang.Iterable values) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, spaceUse_); + onChanged(); + } else { + spaceUseBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder clearSpaceUse() { + if (spaceUseBuilder_ == null) { + spaceUse_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + spaceUseBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public Builder removeSpaceUse(int index) { + if (spaceUseBuilder_ == null) { + ensureSpaceUseIsMutable(); + spaceUse_.remove(index); + onChanged(); + } else { + spaceUseBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder getSpaceUseBuilder( + int index) { + return getSpaceUseFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder( + int index) { + if (spaceUseBuilder_ == null) { + return spaceUse_.get(index); } else { + return spaceUseBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List + getSpaceUseOrBuilderList() { + if (spaceUseBuilder_ != null) { + return spaceUseBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(spaceUse_); + } + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder addSpaceUseBuilder() { + return getSpaceUseFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder addSpaceUseBuilder( + int index) { + return getSpaceUseFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpaceUse space_use = 1; + */ + public java.util.List + getSpaceUseBuilderList() { + return getSpaceUseFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> + getSpaceUseFieldBuilder() { + if (spaceUseBuilder_ == null) { + spaceUseBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>( + spaceUse_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + spaceUse_ = null; + } + return spaceUseBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUseReportRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUseReportRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public RegionSpaceUseReportRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new RegionSpaceUseReportRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface RegionSpaceUseReportResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUseReportResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUseReportResponse} + */ + public static final class RegionSpaceUseReportResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUseReportResponse) + RegionSpaceUseReportResponseOrBuilder { + // Use RegionSpaceUseReportResponse.newBuilder() to construct. + private RegionSpaceUseReportResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RegionSpaceUseReportResponse() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionSpaceUseReportResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) obj; + + boolean result = true; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RegionSpaceUseReportResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUseReportResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse(this); + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUseReportResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUseReportResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public RegionSpaceUseReportResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new RegionSpaceUseReportResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + /** * Protobuf service {@code hbase.pb.RegionServerStatusService} */ @@ -10265,6 +12171,19 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+       **
+       * Reports Region filesystem space use
+       * 
+ * + * rpc ReportRegionSpaceUse(.hbase.pb.RegionSpaceUseReportRequest) returns (.hbase.pb.RegionSpaceUseReportResponse); + */ + public abstract void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService( @@ -10326,6 +12245,14 @@ public final class RegionServerStatusProtos { impl.getProcedureResult(controller, request, done); } + @java.lang.Override + public void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.reportRegionSpaceUse(controller, request, done); + } + }; } @@ -10362,6 +12289,8 @@ public final class RegionServerStatusProtos { return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)request); case 6: return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); + case 7: + return impl.reportRegionSpaceUse(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10390,6 +12319,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10418,6 +12349,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10519,6 +12452,19 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+     **
+     * Reports Region filesystem space use
+     * 
+ * + * rpc ReportRegionSpaceUse(.hbase.pb.RegionSpaceUseReportRequest) returns (.hbase.pb.RegionSpaceUseReportResponse); + */ + public abstract void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -10576,6 +12522,11 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 7: + this.reportRegionSpaceUse(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -10604,6 +12555,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10632,6 +12585,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(); case 6: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10757,6 +12712,21 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance())); } + + public void reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -10799,6 +12769,11 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -10891,6 +12866,18 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse reportRegionSpaceUse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.RegionServerStatusService) @@ -10961,6 +12948,21 @@ public final class RegionServerStatusProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RegionSpaceUse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -11006,28 +13008,35 @@ public final class RegionServerStatusProtos { "est\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" + "onInfo\022\021\n\tsplit_row\030\002 \002(\014\022\026\n\013nonce_group" + "\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"+\n\030SplitTabl" + - "eRegionResponse\022\017\n\007proc_id\030\001 \001(\0042\347\005\n\031Reg" + - "ionServerStatusService\022b\n\023RegionServerSt" + - "artup\022$.hbase.pb.RegionServerStartupRequ" + - "est\032%.hbase.pb.RegionServerStartupRespon", - "se\022_\n\022RegionServerReport\022#.hbase.pb.Regi" + - "onServerReportRequest\032$.hbase.pb.RegionS" + - "erverReportResponse\022_\n\022ReportRSFatalErro" + - "r\022#.hbase.pb.ReportRSFatalErrorRequest\032$" + - ".hbase.pb.ReportRSFatalErrorResponse\022q\n\030" + - "GetLastFlushedSequenceId\022).hbase.pb.GetL" + - "astFlushedSequenceIdRequest\032*.hbase.pb.G" + - "etLastFlushedSequenceIdResponse\022z\n\033Repor" + - "tRegionStateTransition\022,.hbase.pb.Report" + - "RegionStateTransitionRequest\032-.hbase.pb.", - "ReportRegionStateTransitionResponse\022T\n\013S" + - "plitRegion\022!.hbase.pb.SplitTableRegionRe" + - "quest\032\".hbase.pb.SplitTableRegionRespons" + - "e\022_\n\022getProcedureResult\022#.hbase.pb.GetPr" + - "ocedureResultRequest\032$.hbase.pb.GetProce" + - "dureResultResponseBU\n1org.apache.hadoop." + - "hbase.shaded.protobuf.generatedB\030RegionS" + - "erverStatusProtosH\001\210\001\001\240\001\001" + "eRegionResponse\022\017\n\007proc_id\030\001 \001(\004\"D\n\016Regi" + + "onSpaceUse\022$\n\006region\030\001 \001(\0132\024.hbase.pb.Re" + + "gionInfo\022\014\n\004size\030\002 \001(\004\"J\n\033RegionSpaceUse" + + "ReportRequest\022+\n\tspace_use\030\001 \003(\0132\030.hbase", + ".pb.RegionSpaceUse\"\036\n\034RegionSpaceUseRepo" + + "rtResponse2\316\006\n\031RegionServerStatusService" + + "\022b\n\023RegionServerStartup\022$.hbase.pb.Regio" + + "nServerStartupRequest\032%.hbase.pb.RegionS" + + "erverStartupResponse\022_\n\022RegionServerRepo" + + "rt\022#.hbase.pb.RegionServerReportRequest\032" + + "$.hbase.pb.RegionServerReportResponse\022_\n" + + "\022ReportRSFatalError\022#.hbase.pb.ReportRSF" + + "atalErrorRequest\032$.hbase.pb.ReportRSFata" + + "lErrorResponse\022q\n\030GetLastFlushedSequence", + "Id\022).hbase.pb.GetLastFlushedSequenceIdRe" + + "quest\032*.hbase.pb.GetLastFlushedSequenceI" + + "dResponse\022z\n\033ReportRegionStateTransition" + + "\022,.hbase.pb.ReportRegionStateTransitionR" + + "equest\032-.hbase.pb.ReportRegionStateTrans" + + "itionResponse\022T\n\013SplitRegion\022!.hbase.pb." + + "SplitTableRegionRequest\032\".hbase.pb.Split" + + "TableRegionResponse\022_\n\022getProcedureResul" + + "t\022#.hbase.pb.GetProcedureResultRequest\032$" + + ".hbase.pb.GetProcedureResultResponse\022e\n\024", + "ReportRegionSpaceUse\022%.hbase.pb.RegionSp" + + "aceUseReportRequest\032&.hbase.pb.RegionSpa" + + "ceUseReportResponseBU\n1org.apache.hadoop" + + ".hbase.shaded.protobuf.generatedB\030Region" + + "ServerStatusProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -11122,6 +13131,24 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SplitTableRegionResponse_descriptor, new java.lang.String[] { "ProcId", }); + internal_static_hbase_pb_RegionSpaceUse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_RegionSpaceUse_descriptor, + new java.lang.String[] { "Region", "Size", }); + internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor, + new java.lang.String[] { "SpaceUse", }); + internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor, + new java.lang.String[] { }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(); diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto index 1c373ee..23ddd43 100644 --- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto @@ -141,6 +141,22 @@ message SplitTableRegionResponse { optional uint64 proc_id = 1; } +message RegionSpaceUse { + optional RegionInfo region = 1; // A region identifier + optional uint64 size = 2; // The size in bytes of the region +} + +/** + * Reports filesystem usage for regions. + */ +message RegionSpaceUseReportRequest { + repeated RegionSpaceUse space_use = 1; +} + +message RegionSpaceUseReportResponse { + +} + service RegionServerStatusService { /** Called when a region server first starts. */ rpc RegionServerStartup(RegionServerStartupRequest) @@ -182,4 +198,10 @@ service RegionServerStatusService { */ rpc getProcedureResult(GetProcedureResultRequest) returns(GetProcedureResultResponse); + + /** + * Reports Region filesystem space use + */ + rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest) + returns(RegionSpaceUseReportResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 97eb209..b3ce537 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureUtil; +import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -79,6 +80,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; @@ -1606,4 +1610,19 @@ public class MasterRpcServices extends RSRpcServices } return null; } + + @Override + public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller, + RegionSpaceUseReportRequest request) throws ServiceException { + try { + master.checkInitialized(); + MasterQuotaManager quotaManager = this.master.getMasterQuotaManager(); + for (RegionSpaceUse report : request.getSpaceUseList()) { + quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), report.getSize()); + } + return RegionSpaceUseReportResponse.newBuilder().build(); + } catch (Exception e) { + throw new ServiceException(e); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java new file mode 100644 index 0000000..a9153d3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +/** + * A chore which computes the size of each {@link HRegion} on the FileSystem hosted by the given {@link HRegionServer}. + */ +@InterfaceAudience.Private +public class FileSystemUtilizationChore extends ScheduledChore { + private static final Log LOG = LogFactory.getLog(FileSystemUtilizationChore.class); + static final String FS_UTILIZATION_CHORE_PERIOD_KEY = "hbase.regionserver.quotas.fs.utilization.chore.period"; + static final int FS_UTILIZATION_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 minutes in millis + + static final String FS_UTILIZATION_CHORE_DELAY_KEY = "hbase.regionserver.quotas.fs.utilization.chore.delay"; + static final long FS_UTILIZATION_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 minute + + static final String FS_UTILIZATION_CHORE_TIMEUNIT_KEY = "hbase.regionserver.quotas.fs.utilization.chore.timeunit"; + static final String FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT = TimeUnit.MILLISECONDS.name(); + + static final String FS_UTILIZATION_MAX_ITERATION_DURATION_KEY = "hbase.regionserver.quotas.fs.utilization.chore.max.iteration.millis"; + static final long FS_UTILIZATION_MAX_ITERATION_DURATION_DEFAULT = 5000L; + + private final HRegionServer rs; + private final long maxIterationMillis; + private Iterator leftoverRegions; + + public FileSystemUtilizationChore(HRegionServer rs) { + super(FileSystemUtilizationChore.class.getSimpleName(), rs, getPeriod(rs.getConfiguration()), + getInitialDelay(rs.getConfiguration()), getTimeUnit(rs.getConfiguration())); + this.rs = rs; + this.maxIterationMillis = rs.getConfiguration().getLong( + FS_UTILIZATION_MAX_ITERATION_DURATION_KEY, FS_UTILIZATION_MAX_ITERATION_DURATION_DEFAULT); + } + + @Override + protected void chore() { + final Map onlineRegionSizes = new HashMap<>(); + // Process the regions from the last run if we have any. If we are somehow having difficulty + // processing the Regions, we want to avoid creating a backlog in memory of Region objs. + final Iterator iterator = (null == leftoverRegions) + ? rs.getOnlineRegions().iterator() : leftoverRegions; + // Reset the leftoverRegions and let the loop re-assign if necessary. + leftoverRegions = null; + long regionSizesCalculated = 0L; + final long start = EnvironmentEdgeManager.currentTime(); + while (iterator.hasNext()) { + final Region onlineRegion = iterator.next(); + final long sizeInBytes = computeSize(onlineRegion); + onlineRegionSizes.put(onlineRegion.getRegionInfo(), sizeInBytes); + regionSizesCalculated++; + // Make sure this chore doesn't hog the thread. + if (iterator.hasNext()) { + long timeRunning = EnvironmentEdgeManager.currentTime() - start; + if (timeRunning > maxIterationMillis) { + LOG.debug("Preempting execution of FileSystemUtilizationChore because it exceeds the" + + " maximum iteration configuration value. Will process remaining iterators" + + " on a subsequent invocation."); + leftoverRegions = iterator; + break; + } + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("Computed the size of " + regionSizesCalculated + " Regions."); + } + reportRegionSizesToMaster(onlineRegionSizes); + } + + /** + * Computes total FileSystem size for the given {@link Region}. + * + * @param r The region + * @return The size, in bytes, of the Region. + */ + long computeSize(Region r) { + long regionSize = 0L; + for (Store store : r.getStores()) { + // StoreFile/StoreFileReaders are already instantiated with the file length cached. + // Can avoid extra NN ops. + regionSize += store.getStorefilesSize(); + } + return regionSize; + } + + /** + * Reports the computed region sizes to the currently active Master. + * + * @param onlineRegionSizes The computed region sizes to report. + */ + void reportRegionSizesToMaster(Map onlineRegionSizes) { + this.rs.reportRegionSizesForQuotas(onlineRegionSizes); + } + + /** + * Extracts the period for the chore from the configuration. + * + * @param conf The configuration object. + * @return The configured chore period or the default value. + */ + static int getPeriod(Configuration conf) { + return conf.getInt(FS_UTILIZATION_CHORE_PERIOD_KEY, FS_UTILIZATION_CHORE_PERIOD_DEFAULT); + } + + /** + * Extracts the initial delay for the chore from the configuration. + * + * @param conf The configuration object. + * @return The configured chore initial delay or the default value. + */ + static long getInitialDelay(Configuration conf) { + return conf.getLong(FS_UTILIZATION_CHORE_DELAY_KEY, FS_UTILIZATION_CHORE_DELAY_DEFAULT); + } + + /** + * Extracts the time unit for the chore period and initial delay from the configuration. The + * configuration value for {@link #FS_UTILIZATION_CHORE_TIMEUNIT_KEY} must correspond to a + * {@link TimeUnit} value. + * + * @param conf The configuration object. + * @return The configured time unit for the chore period and initial delay or the default value. + */ + static TimeUnit getTimeUnit(Configuration conf) { + return TimeUnit.valueOf(conf.get(FS_UTILIZATION_CHORE_TIMEUNIT_KEY, + FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT)); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index bd9f410..fc24e52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -19,7 +19,10 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -62,6 +65,7 @@ public class MasterQuotaManager implements RegionStateListener { private NamedLock userLocks; private boolean enabled = false; private NamespaceAuditor namespaceQuotaManager; + private ConcurrentHashMap regionSizes; public MasterQuotaManager(final MasterServices masterServices) { this.masterServices = masterServices; @@ -85,6 +89,7 @@ public class MasterQuotaManager implements RegionStateListener { namespaceLocks = new NamedLock(); tableLocks = new NamedLock(); userLocks = new NamedLock(); + regionSizes = new ConcurrentHashMap<>(); namespaceQuotaManager = new NamespaceAuditor(masterServices); namespaceQuotaManager.start(); @@ -515,5 +520,15 @@ public class MasterQuotaManager implements RegionStateListener { this.namespaceQuotaManager.removeRegionFromNamespaceUsage(hri); } } + + public void addRegionSize(HRegionInfo hri, long size) { + // TODO Make proper API + regionSizes.put(hri, size); + } + + public Map snapshotRegionSizes() { + // TODO Make proper API + return new HashMap<>(regionSizes); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8e78422..0fbfd2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -36,6 +36,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Map.Entry; import java.util.Set; import java.util.SortedMap; @@ -72,6 +73,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; @@ -113,6 +115,7 @@ import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.mob.MobCacheConfig; import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost; +import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore; import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager; import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; @@ -146,12 +149,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpeci import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; @@ -501,6 +507,8 @@ public class HRegionServer extends HasThread implements protected final SecureBulkLoadManager secureBulkLoadManager; + protected FileSystemUtilizationChore fsUtilizationChore; + /** * Starts a HRegionServer at the default location. */ @@ -909,6 +917,8 @@ public class HRegionServer extends HasThread implements // Setup the Quota Manager rsQuotaManager = new RegionServerQuotaManager(this); + this.fsUtilizationChore = new FileSystemUtilizationChore(this); + // Setup RPC client for master communication rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress( rpcServices.isa.getAddress(), 0), clusterConnection.getConnectionMetrics()); @@ -1221,6 +1231,66 @@ public class HRegionServer extends HasThread implements } } + /** + * Reports the given map of Regions and their size on the filesystem to the active Master. + * + * @param onlineRegionSizes A map of region info to size in bytes + */ + public void reportRegionSizesForQuotas(final Map onlineRegionSizes) { + RegionServerStatusService.BlockingInterface rss = rssStub; + if (rss == null) { + // the current server could be stopping. + LOG.trace("Skipping Region size report to HMaster as stub is null"); + return; + } + try { + RegionSpaceUseReportRequest request = buildRegionSpaceUseReportRequest( + Objects.requireNonNull(onlineRegionSizes)); + rss.reportRegionSpaceUse(null, request); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); + if (ioe instanceof PleaseHoldException) { + LOG.trace("Failed to report region sizes to Master because it is initializing. This will be retried.", ioe); + // The Master is coming up. Will retry the report later. Avoid re-creating the stub. + return; + } + LOG.debug("Failed to report region sizes to Master. This will be retried.", ioe); + if (rssStub == rss) { + rssStub = null; + } + createRegionServerStatusStub(true); + } + } + + /** + * Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map. + * + * @param regionSizes Map of region info to size in bytes. + * @return The corresponding protocol buffer message. + */ + RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(Map regionSizes) { + RegionSpaceUseReportRequest.Builder request = RegionSpaceUseReportRequest.newBuilder(); + for (Entry entry : Objects.requireNonNull(regionSizes).entrySet()) { + request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue())); + } + return request.build(); + } + + /** + * Converts a pair of {@link HRegionInfo} and {@code long} into a {@link RegionSpaceUse} + * protobuf message. + * + * @param regionInfo The HRegionInfo + * @param sizeInBytes The size in bytes of the Region + * @return The protocol buffer + */ + RegionSpaceUse convertRegionSize(HRegionInfo regionInfo, Long sizeInBytes) { + return RegionSpaceUse.newBuilder() + .setRegion(HRegionInfo.convert(Objects.requireNonNull(regionInfo))) + .setSize(Objects.requireNonNull(sizeInBytes)) + .build(); + } + ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) throws IOException { // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests @@ -1780,6 +1850,7 @@ public class HRegionServer extends HasThread implements if (this.nonceManagerChore != null) choreService.scheduleChore(nonceManagerChore); if (this.storefileRefresher != null) choreService.scheduleChore(storefileRefresher); if (this.movedRegionsCleaner != null) choreService.scheduleChore(movedRegionsCleaner); + if (this.fsUtilizationChore != null) choreService.scheduleChore(fsUtilizationChore); // Leases is not a Thread. Internally it runs a daemon thread. If it gets // an unhandled exception, it will just exit. @@ -2284,6 +2355,7 @@ public class HRegionServer extends HasThread implements if (this.healthCheckChore != null) healthCheckChore.cancel(true); if (this.storefileRefresher != null) storefileRefresher.cancel(true); if (this.movedRegionsCleaner != null) movedRegionsCleaner.cancel(true); + if (this.fsUtilizationChore != null) fsUtilizationChore.cancel(true); if (this.cacheFlusher != null) { this.cacheFlusher.join(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java new file mode 100644 index 0000000..72c40c5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +/** + * Test class for {@link FileSystemUtilizationChore}. + */ +@Category(SmallTests.class) +public class TestFileSystemUtilizationChore { + + @SuppressWarnings("unchecked") + @Test + public void testNoOnlineRegions() { + // One region with a store size of one. + final List regionSizes = Collections.emptyList(); + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) + .when(rs) + .reportRegionSizesForQuotas((Map) any(Map.class)); + + final Region region = mockRegionWithSize(regionSizes); + when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region)); + chore.chore(); + } + + @SuppressWarnings("unchecked") + @Test + public void testRegionSizes() { + // One region with a store size of one. + final List regionSizes = Arrays.asList(1024L); + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) + .when(rs) + .reportRegionSizesForQuotas((Map) any(Map.class)); + + final Region region = mockRegionWithSize(regionSizes); + when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region)); + chore.chore(); + } + + @SuppressWarnings("unchecked") + @Test + public void testMultipleRegionSizes() { + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + + // Three regions with multiple store sizes + final List r1Sizes = Arrays.asList(1024L, 2048L); + final long r1Sum = sum(r1Sizes); + final List r2Sizes = Arrays.asList(1024L * 1024L); + final long r2Sum = sum(r2Sizes); + final List r3Sizes = Arrays.asList(10L * 1024L * 1024L); + final long r3Sum = sum(r3Sizes); + + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum, r2Sum, r3Sum)))) + .when(rs) + .reportRegionSizesForQuotas((Map) any(Map.class)); + + final Region r1 = mockRegionWithSize(r1Sizes); + final Region r2 = mockRegionWithSize(r2Sizes); + final Region r3 = mockRegionWithSize(r3Sizes); + when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3)); + chore.chore(); + } + + @Test + public void testDefaultConfigurationProperties() { + final Configuration conf = getDefaultHBaseConfiguration(); + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + // Verify that the expected default values are actually represented. + assertEquals( + FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_DEFAULT, chore.getPeriod()); + assertEquals( + FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_DEFAULT, chore.getInitialDelay()); + assertEquals( + TimeUnit.valueOf(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT), + chore.getTimeUnit()); + } + + @Test + public void testNonDefaultConfigurationProperties() { + final Configuration conf = getDefaultHBaseConfiguration(); + // Override the default values + final int period = 60 * 10; + final long delay = 30L; + final TimeUnit timeUnit = TimeUnit.SECONDS; + conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, period); + conf.setLong(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, delay); + conf.set(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_KEY, timeUnit.name()); + + // Verify that the chore reports these non-default values + final HRegionServer rs = mockRegionServer(conf); + final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); + assertEquals(period, chore.getPeriod()); + assertEquals(delay, chore.getInitialDelay()); + assertEquals(timeUnit, chore.getTimeUnit()); + } + + /** + * Creates an HBase Configuration object for the default values. + */ + private Configuration getDefaultHBaseConfiguration() { + final Configuration conf = HBaseConfiguration.create(); + conf.addResource("hbase-default.xml"); + return conf; + } + + /** + * Creates an HRegionServer using the given Configuration. + */ + private HRegionServer mockRegionServer(Configuration conf) { + final HRegionServer rs = mock(HRegionServer.class); + when(rs.getConfiguration()).thenReturn(conf); + return rs; + } + + /** + * Sums the collection of non-null numbers. + */ + private long sum(Collection values) { + long sum = 0L; + for (Long value : values) { + assertNotNull(value); + sum += value; + } + return sum; + } + + /** + * Creates a region with a number of Stores equal to the length of {@code storeSizes}. Each + * {@link Store} will have a reported size corresponding to the element in {@code storeSizes}. + * + * @param storeSizes A list of sizes for each Store. + * @return A list of Mocks. + */ + private Region mockRegionWithSize(Collection storeSizes) { + final Region r = mock(Region.class); + final HRegionInfo info = mock(HRegionInfo.class); + when(r.getRegionInfo()).thenReturn(info); + List stores = new ArrayList<>(); + when(r.getStores()).thenReturn(stores); + for (Long storeSize : storeSizes) { + final Store s = mock(Store.class); + stores.add(s); + when(s.getStorefilesSize()).thenReturn(storeSize); + } + return r; + } + + /** + * An Answer implementation which verifies the sum of the Region sizes to report is as expected. + */ + private static class ExpectedRegionSizeSummationAnswer implements Answer { + private final long expectedSize; + + public ExpectedRegionSizeSummationAnswer(long expectedSize) { + this.expectedSize = expectedSize; + } + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + assertEquals(1, args.length); + @SuppressWarnings("unchecked") + Map regionSizes = (Map) args[0]; + long sum = 0L; + for (Long regionSize : regionSizes.values()) { + sum += regionSize; + } + assertEquals(expectedSize, sum); + return null; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java new file mode 100644 index 0000000..ed8a2f3 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Test class which verifies that region sizes are reported to the master. + */ +@Category(MediumTests.class) +public class TestRegionSizeUse { + private static final Log LOG = LogFactory.getLog(TestRegionSizeUse.class); + private static final int SIZE_PER_VALUE = 256; + private static final int NUM_SPLITS = 10; + private static final String F1 = "f1"; + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private MiniHBaseCluster cluster; + + @Rule + public TestName testName = new TestName(); + + @Before + public void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 1000); + conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 1000); + conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + cluster = TEST_UTIL.startMiniCluster(2); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testBasicRegionSizeReports() throws Exception { + final long bytesWritten = 5L * 1024L * 1024L; // 5MB + final TableName tn = writeData(bytesWritten); + LOG.debug("Data was written to HBase"); + final Admin admin = TEST_UTIL.getAdmin(); + // Push the data to disk. + admin.flush(tn); + LOG.debug("Data flushed to disk"); + // Get the final region distribution + final List regions = TEST_UTIL.getAdmin().getTableRegions(tn); + + HMaster master = cluster.getMaster(); + MasterQuotaManager quotaManager = master.getMasterQuotaManager(); + Map regionSizes = quotaManager.snapshotRegionSizes(); + // Wait until we get all of the region reports for our table + // The table may split, so make sure we have at least as many as expected right after we + // finished writing the data. + int observedRegions = numRegionsForTable(tn, regionSizes); + while (observedRegions < regions.size()) { + LOG.debug("Expecting more regions. Saw " + observedRegions + + " region sizes reported, expected at least " + regions.size()); + Thread.sleep(1000); + regionSizes = quotaManager.snapshotRegionSizes(); + observedRegions = numRegionsForTable(tn, regionSizes); + } + + LOG.debug("Observed region sizes by the HMaster: " + regionSizes); + long totalRegionSize = 0L; + for (Long regionSize : regionSizes.values()) { + totalRegionSize += regionSize; + } + assertTrue("Expected region size report to exceed " + bytesWritten + ", but was " + + totalRegionSize + ". RegionSizes=" + regionSizes, bytesWritten < totalRegionSize); + } + + /** + * Writes at least {@code sizeInBytes} bytes of data to HBase and returns the TableName used. + * + * @param sizeInBytes The amount of data to write in bytes. + * @return The table the data was written to + */ + private TableName writeData(long sizeInBytes) throws IOException { + final Connection conn = TEST_UTIL.getConnection(); + final Admin admin = TEST_UTIL.getAdmin(); + final TableName tn = TableName.valueOf(testName.getMethodName()); + + // Delete the old table + if (admin.tableExists(tn)) { + admin.disableTable(tn); + admin.deleteTable(tn); + } + + // Create the table + HTableDescriptor tableDesc = new HTableDescriptor(tn); + tableDesc.addFamily(new HColumnDescriptor(F1)); + admin.createTable(tableDesc, Bytes.toBytes("1"), Bytes.toBytes("9"), NUM_SPLITS); + + final Table table = conn.getTable(tn); + try { + List updates = new ArrayList<>(); + long bytesToWrite = sizeInBytes; + long rowKeyId = 0L; + final StringBuilder sb = new StringBuilder(); + final Random r = new Random(); + while (bytesToWrite > 0L) { + sb.setLength(0); + sb.append(Long.toString(rowKeyId)); + // Use the reverse counter as the rowKey to get even spread across all regions + Put p = new Put(Bytes.toBytes(sb.reverse().toString())); + byte[] value = new byte[SIZE_PER_VALUE]; + r.nextBytes(value); + p.addColumn(Bytes.toBytes(F1), Bytes.toBytes("q1"), value); + updates.add(p); + + // Batch 50K worth of updates + if (updates.size() > 50) { + table.put(updates); + updates.clear(); + } + + // Just count the value size, ignore the size of rowkey + column + bytesToWrite -= SIZE_PER_VALUE; + rowKeyId++; + } + + // Write the final batch + if (!updates.isEmpty()) { + table.put(updates); + } + + return tn; + } finally { + table.close(); + } + } + + /** + * Computes the number of regions for the given table that have a positive size. + * + * @param tn The TableName in question + * @param regions A collection of region sizes + * @return The number of regions for the given table. + */ + private int numRegionsForTable(TableName tn, Map regions) { + int sum = 0; + for (Entry entry : regions.entrySet()) { + if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) { + sum++; + } + } + return sum; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java new file mode 100644 index 0000000..3244681 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.mock; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test class for isolated (non-cluster) tests surrounding the report + * of Region space use to the Master by RegionServers. + */ +@Category(SmallTests.class) +public class TestRegionServerRegionSpaceUseReport { + + @Test + public void testConversion() { + TableName tn = TableName.valueOf("table1"); + HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b")); + HRegionInfo hri2 = new HRegionInfo(tn, Bytes.toBytes("b"), Bytes.toBytes("c")); + HRegionInfo hri3 = new HRegionInfo(tn, Bytes.toBytes("c"), Bytes.toBytes("d")); + Map sizes = new HashMap<>(); + sizes.put(hri1, 1024L * 1024L); + sizes.put(hri2, 1024L * 1024L * 8L); + sizes.put(hri3, 1024L * 1024L * 32L); + + // Call the real method to convert the map into a protobuf + HRegionServer rs = mock(HRegionServer.class); + doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class)); + doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong()); + + RegionSpaceUseReportRequest requests = rs.buildRegionSpaceUseReportRequest(sizes); + assertEquals(sizes.size(), requests.getSpaceUseCount()); + for (RegionSpaceUse spaceUse : requests.getSpaceUseList()) { + RegionInfo ri = spaceUse.getRegion(); + HRegionInfo hri = HRegionInfo.convert(ri); + Long expectedSize = sizes.remove(hri); + assertNotNull("Could not find size for HRI: " + hri, expectedSize); + assertEquals(expectedSize.longValue(), spaceUse.getSize()); + } + assertTrue("Should not have any space use entries left: " + sizes, sizes.isEmpty()); + } + + @Test(expected = NullPointerException.class) + public void testNullMap() { + // Call the real method to convert the map into a protobuf + HRegionServer rs = mock(HRegionServer.class); + doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class)); + doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong()); + + rs.buildRegionSpaceUseReportRequest(null); + } + + @Test(expected = NullPointerException.class) + public void testMalformedMap() { + TableName tn = TableName.valueOf("table1"); + HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b")); + Map sizes = new HashMap<>(); + sizes.put(hri1, null); + + // Call the real method to convert the map into a protobuf + HRegionServer rs = mock(HRegionServer.class); + doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class)); + doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong()); + + rs.buildRegionSpaceUseReportRequest(sizes); + } +} -- 2.10.2