commit e544da2874cefcc6835c2caf957b6fdfce7b496e Author: thiruvel Date: Thu Jul 7 13:06:21 2016 -0700 HBASE-16169: RegionSizeCalculator should not depend on master diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 51a26bc..6461870 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; @@ -957,6 +958,18 @@ public interface Admin extends Abortable, Closeable { ClusterStatus getClusterStatus() throws IOException; /** + * @return region load map of all regions hosted on a region server + * @throws IOException if a remote or network exception occurs + */ + Map getRegionLoad(ServerName sn) throws IOException; + + /** + * @return region load map of all regions of a table hosted on a region server + * @throws IOException if a remote or network exception occurs + */ + Map getRegionLoad(ServerName sn, TableName tableName) throws IOException; + + /** * @return Configuration used by the instance. */ Configuration getConfiguration(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index da0de51..483735e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -32,6 +32,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.TreeMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; @@ -58,6 +59,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ProcedureUtil; +import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; @@ -1854,6 +1856,24 @@ public class HBaseAdmin implements Admin { } @Override + public Map getRegionLoad(final ServerName sn) throws IOException { + return getRegionLoad(sn, null); + } + + @Override + public Map getRegionLoad(final ServerName sn, final TableName tableName) + throws IOException { + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); + PayloadCarryingRpcController controller = rpcControllerFactory.newController(); + List regionLoads = ProtobufUtil.getRegionLoad(controller, admin, tableName); + Map resultMap = new TreeMap(Bytes.BYTES_COMPARATOR); + for (RegionLoad regionLoad : regionLoads) { + resultMap.put(regionLoad.getName(), regionLoad); + } + return resultMap; + } + + @Override public Configuration getConfiguration() { return this.conf; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index fecc3c2..da2caa1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -94,6 +94,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionReq import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; @@ -1802,6 +1804,29 @@ public final class ProtobufUtil { return getRegionInfos(response); } + public static List getRegionLoad( + final RpcController controller, final AdminService.BlockingInterface admin, + final TableName tableName) throws IOException { + GetRegionLoadRequest request = RequestConverter.buildGetRegionLoadRequest(tableName); + GetRegionLoadResponse response; + try { + response = admin.getRegionLoad(controller, request); + } catch (ServiceException se) { + throw getRemoteException(se); + } + return getRegionLoadInfo(response); + } + + static List getRegionLoadInfo( + GetRegionLoadResponse regionLoadResponse) { + List regionLoadList = + new ArrayList<>(regionLoadResponse.getRegionLoadsCount()); + for (RegionLoad regionLoad : regionLoadResponse.getRegionLoadsList()) { + regionLoadList.add(new org.apache.hadoop.hbase.RegionLoad(regionLoad)); + } + return regionLoadList; + } + /** * Get the list of region info from a GetOnlineRegionResponse * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index ce01e1e..a3ac53c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest; @@ -704,7 +705,21 @@ public final class RequestConverter { return builder.build(); } - /** + /** + * Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table. + * + * @param tableName the table for which regionLoad should be obtained from RS + * @return a protocol buffer GetRegionLoadRequest + */ + public static GetRegionLoadRequest buildGetRegionLoadRequest(final TableName tableName) { + GetRegionLoadRequest.Builder builder = GetRegionLoadRequest.newBuilder(); + if (tableName != null) { + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); + } + return builder.build(); + } + + /** * Create a protocol buffer GetStoreFileRequest for a given region name * * @param regionName the name of the region to get info diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 1c59ea6..b9002f8 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -22420,6 +22420,1396 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:hbase.pb.UpdateConfigurationResponse) } + public interface GetRegionLoadRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.TableName table_name = 1; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + boolean hasTableName(); + /** + * optional .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * optional .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetRegionLoadRequest} + */ + public static final class GetRegionLoadRequest extends + com.google.protobuf.GeneratedMessage + implements GetRegionLoadRequestOrBuilder { + // Use GetRegionLoadRequest.newBuilder() to construct. + private GetRegionLoadRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRegionLoadRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRegionLoadRequest defaultInstance; + public static GetRegionLoadRequest getDefaultInstance() { + return defaultInstance; + } + + public GetRegionLoadRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRegionLoadRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRegionLoadRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRegionLoadRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRegionLoadRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRegionLoadRequest) + } + + static { + defaultInstance = new GetRegionLoadRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRegionLoadRequest) + } + + public interface GetRegionLoadResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.RegionLoad region_loads = 1; + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + java.util.List + getRegionLoadsList(); + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index); + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + int getRegionLoadsCount(); + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + java.util.List + getRegionLoadsOrBuilderList(); + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.GetRegionLoadResponse} + */ + public static final class GetRegionLoadResponse extends + com.google.protobuf.GeneratedMessage + implements GetRegionLoadResponseOrBuilder { + // Use GetRegionLoadResponse.newBuilder() to construct. + private GetRegionLoadResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRegionLoadResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRegionLoadResponse defaultInstance; + public static GetRegionLoadResponse getDefaultInstance() { + return defaultInstance; + } + + public GetRegionLoadResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRegionLoadResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + regionLoads_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + regionLoads_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRegionLoadResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRegionLoadResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.RegionLoad region_loads = 1; + public static final int REGION_LOADS_FIELD_NUMBER = 1; + private java.util.List regionLoads_; + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + public java.util.List getRegionLoadsList() { + return regionLoads_; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + public java.util.List + getRegionLoadsOrBuilderList() { + return regionLoads_; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + public int getRegionLoadsCount() { + return regionLoads_.size(); + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) { + return regionLoads_.get(index); + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+     ** Information on the load of individual regions.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index) { + return regionLoads_.get(index); + } + + private void initFields() { + regionLoads_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getRegionLoadsCount(); i++) { + if (!getRegionLoads(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < regionLoads_.size(); i++) { + output.writeMessage(1, regionLoads_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < regionLoads_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, regionLoads_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse) obj; + + boolean result = true; + result = result && getRegionLoadsList() + .equals(other.getRegionLoadsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getRegionLoadsCount() > 0) { + hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER; + hash = (53 * hash) + getRegionLoadsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRegionLoadResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionLoadsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionLoadsBuilder_ == null) { + regionLoads_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + regionLoadsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse(this); + int from_bitField0_ = bitField0_; + if (regionLoadsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.regionLoads_ = regionLoads_; + } else { + result.regionLoads_ = regionLoadsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()) return this; + if (regionLoadsBuilder_ == null) { + if (!other.regionLoads_.isEmpty()) { + if (regionLoads_.isEmpty()) { + regionLoads_ = other.regionLoads_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRegionLoadsIsMutable(); + regionLoads_.addAll(other.regionLoads_); + } + onChanged(); + } + } else { + if (!other.regionLoads_.isEmpty()) { + if (regionLoadsBuilder_.isEmpty()) { + regionLoadsBuilder_.dispose(); + regionLoadsBuilder_ = null; + regionLoads_ = other.regionLoads_; + bitField0_ = (bitField0_ & ~0x00000001); + regionLoadsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionLoadsFieldBuilder() : null; + } else { + regionLoadsBuilder_.addAllMessages(other.regionLoads_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getRegionLoadsCount(); i++) { + if (!getRegionLoads(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.RegionLoad region_loads = 1; + private java.util.List regionLoads_ = + java.util.Collections.emptyList(); + private void ensureRegionLoadsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + regionLoads_ = new java.util.ArrayList(regionLoads_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> regionLoadsBuilder_; + + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public java.util.List getRegionLoadsList() { + if (regionLoadsBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionLoads_); + } else { + return regionLoadsBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public int getRegionLoadsCount() { + if (regionLoadsBuilder_ == null) { + return regionLoads_.size(); + } else { + return regionLoadsBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) { + if (regionLoadsBuilder_ == null) { + return regionLoads_.get(index); + } else { + return regionLoadsBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder setRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionLoadsIsMutable(); + regionLoads_.set(index, value); + onChanged(); + } else { + regionLoadsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder setRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.set(index, builderForValue.build()); + onChanged(); + } else { + regionLoadsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder addRegionLoads(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionLoadsIsMutable(); + regionLoads_.add(value); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder addRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionLoadsIsMutable(); + regionLoads_.add(index, value); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder addRegionLoads( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.add(builderForValue.build()); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder addRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.add(index, builderForValue.build()); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder addAllRegionLoads( + java.lang.Iterable values) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + super.addAll(values, regionLoads_); + onChanged(); + } else { + regionLoadsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder clearRegionLoads() { + if (regionLoadsBuilder_ == null) { + regionLoads_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + regionLoadsBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public Builder removeRegionLoads(int index) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.remove(index); + onChanged(); + } else { + regionLoadsBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder getRegionLoadsBuilder( + int index) { + return getRegionLoadsFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index) { + if (regionLoadsBuilder_ == null) { + return regionLoads_.get(index); } else { + return regionLoadsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public java.util.List + getRegionLoadsOrBuilderList() { + if (regionLoadsBuilder_ != null) { + return regionLoadsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionLoads_); + } + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder() { + return getRegionLoadsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder( + int index) { + return getRegionLoadsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionLoad region_loads = 1; + * + *
+       ** Information on the load of individual regions.
+       * 
+ */ + public java.util.List + getRegionLoadsBuilderList() { + return getRegionLoadsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> + getRegionLoadsFieldBuilder() { + if (regionLoadsBuilder_ == null) { + regionLoadsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>( + regionLoads_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + regionLoads_ = null; + } + return regionLoadsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRegionLoadResponse) + } + + static { + defaultInstance = new GetRegionLoadResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRegionLoadResponse) + } + /** * Protobuf service {@code hbase.pb.AdminService} */ @@ -22437,6 +23827,14 @@ public final class AdminProtos { com.google.protobuf.RpcCallback done); /** + * rpc GetRegionLoad(.hbase.pb.GetRegionLoadRequest) returns (.hbase.pb.GetRegionLoadResponse); + */ + public abstract void getRegionLoad( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest request, + com.google.protobuf.RpcCallback done); + + /** * rpc GetStoreFile(.hbase.pb.GetStoreFileRequest) returns (.hbase.pb.GetStoreFileResponse); */ public abstract void getStoreFile( @@ -22578,6 +23976,14 @@ public final class AdminProtos { } @java.lang.Override + public void getRegionLoad( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest request, + com.google.protobuf.RpcCallback done) { + impl.getRegionLoad(controller, request, done); + } + + @java.lang.Override public void getStoreFile( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request, @@ -22730,36 +24136,38 @@ public final class AdminProtos { case 0: return impl.getRegionInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest)request); case 1: - return impl.getStoreFile(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest)request); + return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest)request); case 2: - return impl.getOnlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest)request); + return impl.getStoreFile(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest)request); case 3: - return impl.openRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest)request); + return impl.getOnlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest)request); case 4: - return impl.warmupRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)request); + return impl.openRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest)request); case 5: - return impl.closeRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest)request); + return impl.warmupRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)request); case 6: - return impl.flushRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest)request); + return impl.closeRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest)request); case 7: - return impl.splitRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest)request); + return impl.flushRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest)request); case 8: - return impl.compactRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request); + return impl.splitRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest)request); case 9: - return impl.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request); + return impl.compactRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request); case 10: - return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request); case 11: - return impl.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 12: - return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request); + return impl.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 13: - return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request); + return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request); case 14: - return impl.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request); + return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request); case 15: - return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); + return impl.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request); case 16: + return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); + case 17: return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -22778,36 +24186,38 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -22826,36 +24236,38 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -22874,6 +24286,14 @@ public final class AdminProtos { com.google.protobuf.RpcCallback done); /** + * rpc GetRegionLoad(.hbase.pb.GetRegionLoadRequest) returns (.hbase.pb.GetRegionLoadResponse); + */ + public abstract void getRegionLoad( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest request, + com.google.protobuf.RpcCallback done); + + /** * rpc GetStoreFile(.hbase.pb.GetStoreFileRequest) returns (.hbase.pb.GetStoreFileResponse); */ public abstract void getStoreFile( @@ -23029,81 +24449,86 @@ public final class AdminProtos { done)); return; case 1: + this.getRegionLoad(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: this.getStoreFile(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 2: + case 3: this.getOnlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 3: + case 4: this.openRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 4: + case 5: this.warmupRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 5: + case 6: this.closeRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 6: + case 7: this.flushRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 7: + case 8: this.splitRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 8: + case 9: this.compactRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 9: + case 10: this.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 10: + case 11: this.replicateWALEntry(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 11: + case 12: this.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 13: this.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 14: this.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 15: this.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 16: this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 16: + case 17: this.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -23125,36 +24550,38 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -23173,36 +24600,38 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -23240,12 +24669,27 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance())); } + public void getRegionLoad( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance())); + } + public void getStoreFile( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(1), + getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(), @@ -23260,7 +24704,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(2), + getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(), @@ -23275,7 +24719,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(3), + getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(), @@ -23290,7 +24734,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(), @@ -23305,7 +24749,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(), @@ -23320,7 +24764,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(), @@ -23335,7 +24779,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(), @@ -23350,7 +24794,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(), @@ -23365,7 +24809,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(), @@ -23380,7 +24824,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -23395,7 +24839,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -23410,7 +24854,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(), @@ -23425,7 +24869,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(), @@ -23440,7 +24884,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(), @@ -23455,7 +24899,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(), @@ -23470,7 +24914,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(), @@ -23492,6 +24936,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest request) throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse getRegionLoad( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest request) + throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse getStoreFile( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request) @@ -23592,12 +25041,24 @@ public final class AdminProtos { } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse getRegionLoad( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse getStoreFile( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), + getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance()); @@ -23609,7 +25070,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), + getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance()); @@ -23621,7 +25082,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), + getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance()); @@ -23633,7 +25094,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance()); @@ -23645,7 +25106,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance()); @@ -23657,7 +25118,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance()); @@ -23669,7 +25130,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance()); @@ -23681,7 +25142,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance()); @@ -23693,7 +25154,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()); @@ -23705,7 +25166,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -23717,7 +25178,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -23729,7 +25190,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance()); @@ -23741,7 +25202,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance()); @@ -23753,7 +25214,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance()); @@ -23765,7 +25226,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()); @@ -23777,7 +25238,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); @@ -23968,6 +25429,16 @@ public final class AdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRegionLoadRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRegionLoadResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -23977,118 +25448,124 @@ public final class AdminProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\013Admin.proto\022\010hbase.pb\032\014Client.proto\032\013H" + - "Base.proto\032\tWAL.proto\"[\n\024GetRegionInfoRe" + - "quest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionS" + - "pecifier\022\030\n\020compaction_state\030\002 \001(\010\"\353\001\n\025G" + - "etRegionInfoResponse\022)\n\013region_info\030\001 \002(" + - "\0132\024.hbase.pb.RegionInfo\022I\n\020compaction_st" + - "ate\030\002 \001(\0162/.hbase.pb.GetRegionInfoRespon" + - "se.CompactionState\022\024\n\014isRecovering\030\003 \001(\010" + - "\"F\n\017CompactionState\022\010\n\004NONE\020\000\022\t\n\005MINOR\020\001" + - "\022\t\n\005MAJOR\020\002\022\023\n\017MAJOR_AND_MINOR\020\003\"P\n\023GetS", - "toreFileRequest\022)\n\006region\030\001 \002(\0132\031.hbase." + - "pb.RegionSpecifier\022\016\n\006family\030\002 \003(\014\"*\n\024Ge" + - "tStoreFileResponse\022\022\n\nstore_file\030\001 \003(\t\"\030" + - "\n\026GetOnlineRegionRequest\"D\n\027GetOnlineReg" + - "ionResponse\022)\n\013region_info\030\001 \003(\0132\024.hbase" + - ".pb.RegionInfo\"\263\002\n\021OpenRegionRequest\022=\n\t" + - "open_info\030\001 \003(\0132*.hbase.pb.OpenRegionReq" + - "uest.RegionOpenInfo\022\027\n\017serverStartCode\030\002" + - " \001(\004\022\032\n\022master_system_time\030\005 \001(\004\032\251\001\n\016Reg" + - "ionOpenInfo\022$\n\006region\030\001 \002(\0132\024.hbase.pb.R", - "egionInfo\022\037\n\027version_of_offline_node\030\002 \001" + - "(\r\022+\n\rfavored_nodes\030\003 \003(\0132\024.hbase.pb.Ser" + - "verName\022#\n\033openForDistributedLogReplay\030\004" + - " \001(\010\"\246\001\n\022OpenRegionResponse\022F\n\ropening_s" + - "tate\030\001 \003(\0162/.hbase.pb.OpenRegionResponse" + - ".RegionOpeningState\"H\n\022RegionOpeningStat" + - "e\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016FAI" + - "LED_OPENING\020\002\"?\n\023WarmupRegionRequest\022(\n\n" + - "regionInfo\030\001 \002(\0132\024.hbase.pb.RegionInfo\"\026" + - "\n\024WarmupRegionResponse\"\313\001\n\022CloseRegionRe", - "quest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionS" + - "pecifier\022\037\n\027version_of_closing_node\030\002 \001(" + - "\r\022\036\n\020transition_in_ZK\030\003 \001(\010:\004true\0220\n\022des" + - "tination_server\030\004 \001(\0132\024.hbase.pb.ServerN" + - "ame\022\027\n\017serverStartCode\030\005 \001(\004\"%\n\023CloseReg" + - "ionResponse\022\016\n\006closed\030\001 \002(\010\"y\n\022FlushRegi" + - "onRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Reg" + - "ionSpecifier\022\030\n\020if_older_than_ts\030\002 \001(\004\022\036" + - "\n\026write_flush_wal_marker\030\003 \001(\010\"_\n\023FlushR" + - "egionResponse\022\027\n\017last_flush_time\030\001 \002(\004\022\017", - "\n\007flushed\030\002 \001(\010\022\036\n\026wrote_flush_wal_marke" + - "r\030\003 \001(\010\"T\n\022SplitRegionRequest\022)\n\006region\030" + - "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\023\n\013spli" + - "t_point\030\002 \001(\014\"\025\n\023SplitRegionResponse\"`\n\024" + - "CompactRegionRequest\022)\n\006region\030\001 \002(\0132\031.h" + - "base.pb.RegionSpecifier\022\r\n\005major\030\002 \001(\010\022\016" + - "\n\006family\030\003 \001(\014\"\027\n\025CompactRegionResponse\"" + - "\315\001\n\031UpdateFavoredNodesRequest\022I\n\013update_" + - "info\030\001 \003(\01324.hbase.pb.UpdateFavoredNodes" + - "Request.RegionUpdateInfo\032e\n\020RegionUpdate", - "Info\022$\n\006region\030\001 \002(\0132\024.hbase.pb.RegionIn" + - "fo\022+\n\rfavored_nodes\030\002 \003(\0132\024.hbase.pb.Ser" + - "verName\".\n\032UpdateFavoredNodesResponse\022\020\n" + - "\010response\030\001 \001(\r\"\244\001\n\023MergeRegionsRequest\022" + - "+\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpeci" + - "fier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.Region" + - "Specifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\032\n\022ma" + - "ster_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRe" + - "sponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase." + - "pb.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025as", - "sociated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateW" + - "ALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb" + - ".WALEntry\022\034\n\024replicationClusterId\030\002 \001(\t\022" + - "\"\n\032sourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031s" + - "ourceHFileArchiveDirPath\030\004 \001(\t\"\033\n\031Replic" + - "ateWALEntryResponse\"\026\n\024RollWALWriterRequ" + - "est\"0\n\025RollWALWriterResponse\022\027\n\017region_t" + - "o_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006re" + - "ason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetS" + - "erverInfoRequest\"K\n\nServerInfo\022)\n\013server", - "_name\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nweb" + - "ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" + - "\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" + - "o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" + - "eConfigurationResponse2\207\013\n\014AdminService\022" + - "P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" + - "oRequest\032\037.hbase.pb.GetRegionInfoRespons" + - "e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" + - "eRequest\032\036.hbase.pb.GetStoreFileResponse" + - "\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline", - "RegionRequest\032!.hbase.pb.GetOnlineRegion" + - "Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" + - "gionRequest\032\034.hbase.pb.OpenRegionRespons" + - "e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" + - "nRequest\032\036.hbase.pb.WarmupRegionResponse" + - "\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" + - "quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" + - "FlushRegion\022\034.hbase.pb.FlushRegionReques" + - "t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" + - "tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.", - "hbase.pb.SplitRegionResponse\022P\n\rCompactR" + - "egion\022\036.hbase.pb.CompactRegionRequest\032\037." + - "hbase.pb.CompactRegionResponse\022M\n\014MergeR" + - "egions\022\035.hbase.pb.MergeRegionsRequest\032\036." + - "hbase.pb.MergeRegionsResponse\022\\\n\021Replica" + - "teWALEntry\022\".hbase.pb.ReplicateWALEntryR" + + "\n\013Admin.proto\022\010hbase.pb\032\014Client.proto\032\023C" + + "lusterStatus.proto\032\013HBase.proto\032\tWAL.pro" + + "to\"[\n\024GetRegionInfoRequest\022)\n\006region\030\001 \002" + + "(\0132\031.hbase.pb.RegionSpecifier\022\030\n\020compact" + + "ion_state\030\002 \001(\010\"\353\001\n\025GetRegionInfoRespons" + + "e\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Region" + + "Info\022I\n\020compaction_state\030\002 \001(\0162/.hbase.p" + + "b.GetRegionInfoResponse.CompactionState\022" + + "\024\n\014isRecovering\030\003 \001(\010\"F\n\017CompactionState" + + "\022\010\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJOR\020\002\022\023\n\017MAJO", + "R_AND_MINOR\020\003\"P\n\023GetStoreFileRequest\022)\n\006" + + "region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022" + + "\016\n\006family\030\002 \003(\014\"*\n\024GetStoreFileResponse\022" + + "\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetOnlineRegionRe" + + "quest\"D\n\027GetOnlineRegionResponse\022)\n\013regi" + + "on_info\030\001 \003(\0132\024.hbase.pb.RegionInfo\"\263\002\n\021" + + "OpenRegionRequest\022=\n\topen_info\030\001 \003(\0132*.h" + + "base.pb.OpenRegionRequest.RegionOpenInfo" + + "\022\027\n\017serverStartCode\030\002 \001(\004\022\032\n\022master_syst" + + "em_time\030\005 \001(\004\032\251\001\n\016RegionOpenInfo\022$\n\006regi", + "on\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\037\n\027versio" + + "n_of_offline_node\030\002 \001(\r\022+\n\rfavored_nodes" + + "\030\003 \003(\0132\024.hbase.pb.ServerName\022#\n\033openForD" + + "istributedLogReplay\030\004 \001(\010\"\246\001\n\022OpenRegion" + + "Response\022F\n\ropening_state\030\001 \003(\0162/.hbase." + + "pb.OpenRegionResponse.RegionOpeningState" + + "\"H\n\022RegionOpeningState\022\n\n\006OPENED\020\000\022\022\n\016AL" + + "READY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"?\n\023Wa" + + "rmupRegionRequest\022(\n\nregionInfo\030\001 \002(\0132\024." + + "hbase.pb.RegionInfo\"\026\n\024WarmupRegionRespo", + "nse\"\313\001\n\022CloseRegionRequest\022)\n\006region\030\001 \002" + + "(\0132\031.hbase.pb.RegionSpecifier\022\037\n\027version" + + "_of_closing_node\030\002 \001(\r\022\036\n\020transition_in_" + + "ZK\030\003 \001(\010:\004true\0220\n\022destination_server\030\004 \001" + + "(\0132\024.hbase.pb.ServerName\022\027\n\017serverStartC" + + "ode\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016\n\006clos" + + "ed\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006region" + + "\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\030\n\020if_" + + "older_than_ts\030\002 \001(\004\022\036\n\026write_flush_wal_m" + + "arker\030\003 \001(\010\"_\n\023FlushRegionResponse\022\027\n\017la", + "st_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026" + + "wrote_flush_wal_marker\030\003 \001(\010\"T\n\022SplitReg" + + "ionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + + "gionSpecifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023Sp" + + "litRegionResponse\"`\n\024CompactRegionReques" + + "t\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" + + "fier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025C" + + "ompactRegionResponse\"\315\001\n\031UpdateFavoredNo" + + "desRequest\022I\n\013update_info\030\001 \003(\01324.hbase." + + "pb.UpdateFavoredNodesRequest.RegionUpdat", + "eInfo\032e\n\020RegionUpdateInfo\022$\n\006region\030\001 \002(" + + "\0132\024.hbase.pb.RegionInfo\022+\n\rfavored_nodes" + + "\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032UpdateFa" + + "voredNodesResponse\022\020\n\010response\030\001 \001(\r\"\244\001\n" + + "\023MergeRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031" + + ".hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 " + + "\002(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcib" + + "le\030\003 \001(\010:\005false\022\032\n\022master_system_time\030\004 " + + "\001(\004\"\026\n\024MergeRegionsResponse\"a\n\010WALEntry\022" + + "\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n\017key_va", + "lue_bytes\030\002 \003(\014\022\035\n\025associated_cell_count" + + "\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryRequest\022!\n\005e" + + "ntry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n\024replic" + + "ationClusterId\030\002 \001(\t\022\"\n\032sourceBaseNamesp" + + "aceDirPath\030\003 \001(\t\022!\n\031sourceHFileArchiveDi" + + "rPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryResponse" + + "\"\026\n\024RollWALWriterRequest\"0\n\025RollWALWrite" + + "rResponse\022\027\n\017region_to_flush\030\001 \003(\014\"#\n\021St" + + "opServerRequest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopS" + + "erverResponse\"\026\n\024GetServerInfoRequest\"K\n", + "\nServerInfo\022)\n\013server_name\030\001 \002(\0132\024.hbase" + + ".pb.ServerName\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025Ge" + + "tServerInfoResponse\022)\n\013server_info\030\001 \002(\013" + + "2\024.hbase.pb.ServerInfo\"\034\n\032UpdateConfigur" + + "ationRequest\"\035\n\033UpdateConfigurationRespo" + + "nse\"?\n\024GetRegionLoadRequest\022\'\n\ntable_nam" + + "e\030\001 \001(\0132\023.hbase.pb.TableName\"C\n\025GetRegio" + + "nLoadResponse\022*\n\014region_loads\030\001 \003(\0132\024.hb" + + "ase.pb.RegionLoad2\331\013\n\014AdminService\022P\n\rGe" + + "tRegionInfo\022\036.hbase.pb.GetRegionInfoRequ", + "est\032\037.hbase.pb.GetRegionInfoResponse\022P\n\r" + + "GetRegionLoad\022\036.hbase.pb.GetRegionLoadRe" + + "quest\032\037.hbase.pb.GetRegionLoadResponse\022M" + + "\n\014GetStoreFile\022\035.hbase.pb.GetStoreFileRe" + + "quest\032\036.hbase.pb.GetStoreFileResponse\022V\n" + + "\017GetOnlineRegion\022 .hbase.pb.GetOnlineReg" + + "ionRequest\032!.hbase.pb.GetOnlineRegionRes" + + "ponse\022G\n\nOpenRegion\022\033.hbase.pb.OpenRegio" + + "nRequest\032\034.hbase.pb.OpenRegionResponse\022M" + + "\n\014WarmupRegion\022\035.hbase.pb.WarmupRegionRe", + "quest\032\036.hbase.pb.WarmupRegionResponse\022J\n" + + "\013CloseRegion\022\034.hbase.pb.CloseRegionReque" + + "st\032\035.hbase.pb.CloseRegionResponse\022J\n\013Flu" + + "shRegion\022\034.hbase.pb.FlushRegionRequest\032\035" + + ".hbase.pb.FlushRegionResponse\022J\n\013SplitRe" + + "gion\022\034.hbase.pb.SplitRegionRequest\032\035.hba" + + "se.pb.SplitRegionResponse\022P\n\rCompactRegi" + + "on\022\036.hbase.pb.CompactRegionRequest\032\037.hba" + + "se.pb.CompactRegionResponse\022M\n\014MergeRegi" + + "ons\022\035.hbase.pb.MergeRegionsRequest\032\036.hba", + "se.pb.MergeRegionsResponse\022\\\n\021ReplicateW" + + "ALEntry\022\".hbase.pb.ReplicateWALEntryRequ" + + "est\032#.hbase.pb.ReplicateWALEntryResponse" + + "\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEntryR" + "equest\032#.hbase.pb.ReplicateWALEntryRespo" + - "nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" + - "ryRequest\032#.hbase.pb.ReplicateWALEntryRe" + - "sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW", - "ALWriterRequest\032\037.hbase.pb.RollWALWriter" + - "Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" + - "ServerInfoRequest\032\037.hbase.pb.GetServerIn" + - "foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" + - "ServerRequest\032\034.hbase.pb.StopServerRespo" + - "nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" + - "ateFavoredNodesRequest\032$.hbase.pb.Update" + - "FavoredNodesResponse\022b\n\023UpdateConfigurat" + - "ion\022$.hbase.pb.UpdateConfigurationReques" + - "t\032%.hbase.pb.UpdateConfigurationResponse", - "BA\n*org.apache.hadoop.hbase.protobuf.gen" + - "eratedB\013AdminProtosH\001\210\001\001\240\001\001" + "nse\022P\n\rRollWALWriter\022\036.hbase.pb.RollWALW" + + "riterRequest\032\037.hbase.pb.RollWALWriterRes" + + "ponse\022P\n\rGetServerInfo\022\036.hbase.pb.GetSer" + + "verInfoRequest\032\037.hbase.pb.GetServerInfoR" + + "esponse\022G\n\nStopServer\022\033.hbase.pb.StopSer", + "verRequest\032\034.hbase.pb.StopServerResponse" + + "\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Update" + + "FavoredNodesRequest\032$.hbase.pb.UpdateFav" + + "oredNodesResponse\022b\n\023UpdateConfiguration" + + "\022$.hbase.pb.UpdateConfigurationRequest\032%" + + ".hbase.pb.UpdateConfigurationResponseBA\n" + + "*org.apache.hadoop.hbase.protobuf.genera" + + "tedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -24311,6 +25788,18 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationResponse_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_GetRegionLoadRequest_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRegionLoadRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_hbase_pb_GetRegionLoadResponse_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRegionLoadResponse_descriptor, + new java.lang.String[] { "RegionLoads", }); return null; } }; @@ -24318,6 +25807,7 @@ public final class AdminProtos { .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.WALProtos.getDescriptor(), }, assigner); diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index a1905a4..5c58827 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -26,6 +26,7 @@ option java_generate_equals_and_hash = true; option optimize_for = SPEED; import "Client.proto"; +import "ClusterStatus.proto"; import "HBase.proto"; import "WAL.proto"; @@ -256,10 +257,21 @@ message UpdateConfigurationRequest { message UpdateConfigurationResponse { } +message GetRegionLoadRequest { + optional TableName table_name = 1; +} + +message GetRegionLoadResponse { + repeated RegionLoad region_loads = 1; +} + service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); + rpc GetRegionLoad(GetRegionLoadRequest) + returns(GetRegionLoadResponse); + rpc GetStoreFile(GetStoreFileRequest) returns(GetStoreFileResponse); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 877c3db..99ddf2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1472,7 +1472,7 @@ public class HRegionServer extends HasThread implements * * @throws IOException */ - private RegionLoad createRegionLoad(final Region r, RegionLoad.Builder regionLoadBldr, + RegionLoad createRegionLoad(final Region r, RegionLoad.Builder regionLoadBldr, RegionSpecifier.Builder regionSpecifier) throws IOException { byte[] name = r.getRegionInfo().getRegionName(); int stores = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index c3626fd..53dd394 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -104,6 +104,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionReq import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; @@ -152,6 +154,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResul import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; @@ -1486,6 +1489,34 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } + @Override + @QosPriority(priority=HConstants.ADMIN_QOS) + public GetRegionLoadResponse getRegionLoad(RpcController controller, + GetRegionLoadRequest request) throws ServiceException { + + List regions; + if (request.hasTableName()) { + TableName tableName = ProtobufUtil.toTableName(request.getTableName()); + regions = regionServer.getOnlineRegions(tableName); + } else { + regions = regionServer.getOnlineRegions(); + } + List rLoads = new ArrayList(regions.size()); + RegionLoad.Builder regionLoadBuilder = RegionLoad.newBuilder(); + RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); + + try { + for (Region region : regions) { + rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); + } + } catch (IOException e) { + throw new ServiceException(e); + } + GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); + builder.addAllRegionLoads(rLoads); + return builder.build(); + } + /** * Get some information of the region server. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java index 86b06ac..c616a25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java @@ -19,21 +19,18 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; -import java.util.TreeSet; +import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLoad; -import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -59,6 +56,7 @@ public class RegionSizeCalculator { private final Map sizeMap = new TreeMap(Bytes.BYTES_COMPARATOR); static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable"; + private static final long MEGABYTE = 1024L * 1024L; /** * Computes size of each region for table and given column families. @@ -95,38 +93,36 @@ public class RegionSizeCalculator { LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\"."); - //get regions for table - List tableRegionInfos = regionLocator.getAllRegionLocations(); - Set tableRegions = new TreeSet(Bytes.BYTES_COMPARATOR); - for (HRegionLocation regionInfo : tableRegionInfos) { - tableRegions.add(regionInfo.getRegionInfo().getRegionName()); - } - - ClusterStatus clusterStatus = admin.getClusterStatus(); - Collection servers = clusterStatus.getServers(); - final long megaByte = 1024L * 1024L; + // Get the servers which host regions of the table + Set tableServers = getRegionServersOfTable(regionLocator); - //iterate all cluster regions, filter regions from our table and compute their size - for (ServerName serverName: servers) { - ServerLoad serverLoad = clusterStatus.getLoad(serverName); + for (ServerName tableServerName : tableServers) { + Map regionLoads = + admin.getRegionLoad(tableServerName, regionLocator.getName()); + for (RegionLoad regionLoad : regionLoads.values()) { - for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) { byte[] regionId = regionLoad.getName(); + long regionSizeBytes = regionLoad.getStorefileSizeMB() * MEGABYTE; + sizeMap.put(regionId, regionSizeBytes); - if (tableRegions.contains(regionId)) { - - long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte; - sizeMap.put(regionId, regionSizeBytes); - - if (LOG.isDebugEnabled()) { - LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes); - } + if (LOG.isDebugEnabled()) { + LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes); } } } LOG.debug("Region sizes calculated"); } + private Set getRegionServersOfTable(RegionLocator regionLocator) + throws IOException { + + Set tableServers = Sets.newHashSet(); + for (HRegionLocation regionLocation : regionLocator.getAllRegionLocations()) { + tableServers.add(regionLocation.getServerName()); + } + return tableServers; + } + boolean enabled(Configuration configuration) { return configuration.getBoolean(ENABLE_REGIONSIZECALCULATOR, true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java new file mode 100644 index 0000000..ebd9f94 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java @@ -0,0 +1,135 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.*; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +@Category({MiscTests.class, MediumTests.class}) +public class TestRegionLoad { + + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static Admin admin; + + private static final TableName TABLE_1 = TableName.valueOf("table_1"); + private static final TableName TABLE_2 = TableName.valueOf("table_2"); + private static final TableName TABLE_3 = TableName.valueOf("table_3"); + private static final TableName[] tables = new TableName[]{TABLE_1, TABLE_2, TABLE_3}; + + @BeforeClass + public static void beforeClass() throws Exception { + UTIL.startMiniCluster(4); + admin = UTIL.getAdmin(); + admin.setBalancerRunning(false, true); + createTables(); + } + + @AfterClass + public static void afterClass() throws Exception { + for (TableName table : tables) { + UTIL.deleteTableIfAny(table); + } + UTIL.shutdownMiniCluster(); + } + + private static void createTables() throws IOException, InterruptedException { + byte[] FAMILY = Bytes.toBytes("f"); + for (TableName tableName : tables) { + Table table = UTIL.createMultiRegionTable(tableName, FAMILY, 16); + UTIL.waitTableAvailable(tableName); + UTIL.loadTable(table, FAMILY); + } + } + + @Test + public void testRegionLoad() throws Exception { + + // Check if regions match with the regionLoad from the server + for (ServerName serverName : admin.getClusterStatus().getServers()) { + List regions = admin.getOnlineRegions(serverName); + Collection regionLoads = admin.getRegionLoad(serverName).values(); + checkRegionsAndRegionLoads(regions, regionLoads); + } + + // Check if regionLoad matches the table's regions and nothing is missed + for (TableName table : new TableName[]{TABLE_1, TABLE_2, TABLE_3}) { + List tableRegions = admin.getTableRegions(table); + + List regionLoads = Lists.newArrayList(); + for (ServerName serverName : admin.getClusterStatus().getServers()) { + regionLoads.addAll(admin.getRegionLoad(serverName, table).values()); + } + checkRegionsAndRegionLoads(tableRegions, regionLoads); + } + + // Check RegionLoad matches the regionLoad from ClusterStatus + ClusterStatus clusterStatus = admin.getClusterStatus(); + for (ServerName serverName : clusterStatus.getServers()) { + ServerLoad serverLoad = clusterStatus.getLoad(serverName); + Map regionLoads = admin.getRegionLoad(serverName); + compareRegionLoads(serverLoad.getRegionsLoad(), regionLoads); + } + } + + private void compareRegionLoads(Map regionLoadCluster, + Map regionLoads) { + + assertEquals("No of regionLoads from clusterStatus and regionloads from RS doesn't match", + regionLoadCluster.size(), regionLoads.size()); + + // The contents of region load from cluster and server should match + for (byte[] regionName : regionLoadCluster.keySet()) { + regionLoads.remove(regionName); + } + assertEquals("regionLoads from SN should be empty", 0, regionLoads.size()); + } + + private void checkRegionsAndRegionLoads(Collection regions, + Collection regionLoads) { + + assertEquals("No of regions and regionloads doesn't match", + regions.size(), regionLoads.size()); + + Map regionLoadMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + for (RegionLoad regionLoad : regionLoads) { + regionLoadMap.put(regionLoad.getName(), regionLoad); + } + for (HRegionInfo info : regions) { + assertTrue("Region not in regionLoadMap region:" + info.getRegionNameAsString() + + " regionMap: " + regionLoadMap, regionLoadMap.containsKey(info.getRegionName())); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 69f2e35..6a066df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -58,6 +58,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionReq import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionLoadResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; @@ -450,6 +452,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public GetRegionLoadResponse getRegionLoad(RpcController controller, + GetRegionLoadRequest request) throws ServiceException { + GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); + return builder.build(); + } + + @Override public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request) throws ServiceException { // TODO Auto-generated method stub diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java index 8b74112..95d8b4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLoad; -import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -39,8 +37,8 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT; import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @Category({MiscTests.class, SmallTests.class}) @@ -48,6 +46,8 @@ public class TestRegionSizeCalculator { private Configuration configuration = new Configuration(); private final long megabyte = 1024L * 1024L; + private final ServerName sn = ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, + ServerName.NON_STARTCODE); @Test public void testSimpleTestCase() throws Exception { @@ -55,14 +55,9 @@ public class TestRegionSizeCalculator { RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3"); Admin admin = mockAdmin( - mockServer( mockRegion("region1", 123), - mockRegion("region3", 1232) - ), - mockServer( - mockRegion("region2", 54321), - mockRegion("otherTableRegion", 110) - ) + mockRegion("region3", 1232), + mockRegion("region2", 54321) ); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); @@ -70,7 +65,7 @@ public class TestRegionSizeCalculator { assertEquals(123 * megabyte, calculator.getRegionSize("region1".getBytes())); assertEquals(54321 * megabyte, calculator.getRegionSize("region2".getBytes())); assertEquals(1232 * megabyte, calculator.getRegionSize("region3".getBytes())); - // if region is not inside our table, it should return 0 + // if regionCalculator does not know about a region, it should return 0 assertEquals(0 * megabyte, calculator.getRegionSize("otherTableRegion".getBytes())); assertEquals(3, calculator.getRegionSizeMap().size()); @@ -87,9 +82,7 @@ public class TestRegionSizeCalculator { RegionLocator regionLocator = mockRegionLocator("largeRegion"); Admin admin = mockAdmin( - mockServer( mockRegion("largeRegion", Integer.MAX_VALUE) - ) ); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); @@ -104,9 +97,7 @@ public class TestRegionSizeCalculator { RegionLocator table = mockRegionLocator(regionName); Admin admin = mockAdmin( - mockServer( mockRegion(regionName, 999) - ) ); //first request on enabled calculator @@ -133,21 +124,23 @@ public class TestRegionSizeCalculator { for (String regionName : regionNames) { HRegionInfo info = Mockito.mock(HRegionInfo.class); when(info.getRegionName()).thenReturn(regionName.getBytes()); - regionLocations.add(new HRegionLocation(info, null));//we are not interested in values + regionLocations.add(new HRegionLocation(info, sn)); } return mockedTable; } /** - * Creates mock returning ClusterStatus info about given servers. + * Creates mock returning RegionLoad info about given servers. */ - private Admin mockAdmin(ServerLoad... servers) throws Exception { - //get clusterstatus + private Admin mockAdmin(RegionLoad... regionLoadArray) throws Exception { Admin mockAdmin = Mockito.mock(Admin.class); - ClusterStatus clusterStatus = mockCluster(servers); + Map regionLoads = new TreeMap(Bytes.BYTES_COMPARATOR); + for (RegionLoad regionLoad : regionLoadArray) { + regionLoads.put(regionLoad.getName(), regionLoad); + } when(mockAdmin.getConfiguration()).thenReturn(configuration); - when(mockAdmin.getClusterStatus()).thenReturn(clusterStatus); + when(mockAdmin.getRegionLoad(sn, TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads); return mockAdmin; } @@ -163,35 +156,4 @@ public class TestRegionSizeCalculator { when(region.getStorefileSizeMB()).thenReturn(fileSizeMb); return region; } - - private ClusterStatus mockCluster(ServerLoad[] servers) { - List serverNames = new ArrayList(); - - ClusterStatus clusterStatus = Mockito.mock(ClusterStatus.class); - when(clusterStatus.getServers()).thenReturn(serverNames); - - int serverCounter = 0; - for (ServerLoad server : servers) { - ServerName serverName = mock(ServerName.class); - when(serverName.getServerName()).thenReturn("server" + (serverCounter++)); - serverNames.add(serverName); - when(clusterStatus.getLoad(serverName)).thenReturn(server); - } - - return clusterStatus; - } - - /** Creates mock of region server with given regions*/ - private ServerLoad mockServer(RegionLoad... regions) { - ServerLoad serverLoad = Mockito.mock(ServerLoad.class); - Map regionMap = new TreeMap(Bytes.BYTES_COMPARATOR); - - for (RegionLoad regionName : regions) { - regionMap.put(regionName.getName(), regionName); - } - - when(serverLoad.getRegionsLoad()).thenReturn(regionMap); - return serverLoad; - } - }