diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index b04faac..94136a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1289,6 +1289,12 @@ public interface Admin extends Abortable, Closeable { */ CoprocessorRpcChannel coprocessorService(ServerName sn); + /** + * Update the configuration and trigger an online config change + * on the master + * @throws IOException + */ + void updateMasterConfiguration() throws IOException; /** * Update the configuration and trigger an online config change diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index bfdf5d2..d371b53 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -161,6 +161,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; @@ -1739,6 +1740,13 @@ class ConnectionManager { } @Override + public UpdateConfigurationResponse updateConfiguration(RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest + request) throws ServiceException { + return stub.updateConfiguration(controller, request); + } + + @Override public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 329a373..91728ad 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -3761,6 +3761,16 @@ public class HBaseAdmin implements Admin { } @Override + public void updateMasterConfiguration() throws IOException { + try { + this.connection.getMaster().updateConfiguration(null, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.getDefaultInstance()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override public void updateConfiguration(ServerName server) throws IOException { try { this.connection.getAdmin(server).updateConfiguration(null, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index caeea87..1ea6666 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -43699,6 +43699,682 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:SetQuotaResponse) } + public interface UpdateConfigurationRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code UpdateConfigurationRequest} + */ + public static final class UpdateConfigurationRequest extends + com.google.protobuf.GeneratedMessage + implements UpdateConfigurationRequestOrBuilder { + // Use UpdateConfigurationRequest.newBuilder() to construct. + private UpdateConfigurationRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private UpdateConfigurationRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateConfigurationRequest defaultInstance; + public static UpdateConfigurationRequest getDefaultInstance() { + return defaultInstance; + } + + public UpdateConfigurationRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UpdateConfigurationRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateConfigurationRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateConfigurationRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code UpdateConfigurationRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:UpdateConfigurationRequest) + } + + static { + defaultInstance = new UpdateConfigurationRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateConfigurationRequest) + } + + public interface UpdateConfigurationResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code UpdateConfigurationResponse} + */ + public static final class UpdateConfigurationResponse extends + com.google.protobuf.GeneratedMessage + implements UpdateConfigurationResponseOrBuilder { + // Use UpdateConfigurationResponse.newBuilder() to construct. + private UpdateConfigurationResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private UpdateConfigurationResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateConfigurationResponse defaultInstance; + public static UpdateConfigurationResponse getDefaultInstance() { + return defaultInstance; + } + + public UpdateConfigurationResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UpdateConfigurationResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateConfigurationResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateConfigurationResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code UpdateConfigurationResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_UpdateConfigurationResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:UpdateConfigurationResponse) + } + + static { + defaultInstance = new UpdateConfigurationResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateConfigurationResponse) + } + /** * Protobuf service {@code MasterService} */ @@ -44273,6 +44949,14 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc UpdateConfiguration(.UpdateConfigurationRequest) returns (.UpdateConfigurationResponse); + */ + public abstract void updateConfiguration( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -44638,6 +45322,14 @@ public final class MasterProtos { impl.setQuota(controller, request, done); } + @java.lang.Override + public void updateConfiguration( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest request, + com.google.protobuf.RpcCallback done) { + impl.updateConfiguration(controller, request, done); + } + }; } @@ -44750,6 +45442,8 @@ public final class MasterProtos { return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); case 44: return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); + case 45: + return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -44854,6 +45548,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -44958,6 +45654,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -45532,6 +46230,14 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc UpdateConfiguration(.UpdateConfigurationRequest) returns (.UpdateConfigurationResponse); + */ + public abstract void updateConfiguration( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -45779,6 +46485,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 45: + this.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -45883,6 +46594,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -45987,6 +46700,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -46682,6 +47397,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance())); } + + public void updateConfiguration( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(45), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -46914,6 +47644,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse updateConfiguration( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -47462,6 +48197,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse updateConfiguration( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(45), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -47897,6 +48644,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SetQuotaResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateConfigurationRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateConfigurationRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateConfigurationResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateConfigurationResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -48023,88 +48780,92 @@ public final class MasterProtos { "amespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(\0132\n.Tab" + "leName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_glo" + "bals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.ThrottleR" + - "equest\"\022\n\020SetQuotaResponse2\346\030\n\rMasterSer" + - "vice\022S\n\024GetSchemaAlterStatus\022\034.GetSchema" + - "AlterStatusRequest\032\035.GetSchemaAlterStatu", - "sResponse\022P\n\023GetTableDescriptors\022\033.GetTa" + - "bleDescriptorsRequest\032\034.GetTableDescript" + - "orsResponse\022>\n\rGetTableNames\022\025.GetTableN" + - "amesRequest\032\026.GetTableNamesResponse\022G\n\020G" + - "etClusterStatus\022\030.GetClusterStatusReques" + - "t\032\031.GetClusterStatusResponse\022D\n\017IsMaster" + - "Running\022\027.IsMasterRunningRequest\032\030.IsMas" + - "terRunningResponse\0222\n\tAddColumn\022\021.AddCol" + - "umnRequest\032\022.AddColumnResponse\022;\n\014Delete" + - "Column\022\024.DeleteColumnRequest\032\025.DeleteCol", - "umnResponse\022;\n\014ModifyColumn\022\024.ModifyColu" + - "mnRequest\032\025.ModifyColumnResponse\0225\n\nMove" + - "Region\022\022.MoveRegionRequest\032\023.MoveRegionR" + - "esponse\022Y\n\026DispatchMergingRegions\022\036.Disp" + - "atchMergingRegionsRequest\032\037.DispatchMerg" + - "ingRegionsResponse\022;\n\014AssignRegion\022\024.Ass" + - "ignRegionRequest\032\025.AssignRegionResponse\022" + - "A\n\016UnassignRegion\022\026.UnassignRegionReques" + - "t\032\027.UnassignRegionResponse\022>\n\rOfflineReg" + - "ion\022\025.OfflineRegionRequest\032\026.OfflineRegi", - "onResponse\0228\n\013DeleteTable\022\023.DeleteTableR" + - "equest\032\024.DeleteTableResponse\022>\n\rtruncate" + - "Table\022\025.TruncateTableRequest\032\026.TruncateT" + - "ableResponse\0228\n\013EnableTable\022\023.EnableTabl" + - "eRequest\032\024.EnableTableResponse\022;\n\014Disabl" + - "eTable\022\024.DisableTableRequest\032\025.DisableTa" + - "bleResponse\0228\n\013ModifyTable\022\023.ModifyTable" + - "Request\032\024.ModifyTableResponse\0228\n\013CreateT" + - "able\022\023.CreateTableRequest\032\024.CreateTableR" + - "esponse\022/\n\010Shutdown\022\020.ShutdownRequest\032\021.", - "ShutdownResponse\0225\n\nStopMaster\022\022.StopMas" + - "terRequest\032\023.StopMasterResponse\022,\n\007Balan" + - "ce\022\017.BalanceRequest\032\020.BalanceResponse\022M\n" + - "\022SetBalancerRunning\022\032.SetBalancerRunning" + - "Request\032\033.SetBalancerRunningResponse\022A\n\016" + - "RunCatalogScan\022\026.RunCatalogScanRequest\032\027" + - ".RunCatalogScanResponse\022S\n\024EnableCatalog" + - "Janitor\022\034.EnableCatalogJanitorRequest\032\035." + - "EnableCatalogJanitorResponse\022\\\n\027IsCatalo" + - "gJanitorEnabled\022\037.IsCatalogJanitorEnable", - "dRequest\032 .IsCatalogJanitorEnabledRespon" + - "se\022L\n\021ExecMasterService\022\032.CoprocessorSer" + - "viceRequest\032\033.CoprocessorServiceResponse" + - "\022/\n\010Snapshot\022\020.SnapshotRequest\032\021.Snapsho" + - "tResponse\022V\n\025GetCompletedSnapshots\022\035.Get" + - "CompletedSnapshotsRequest\032\036.GetCompleted" + - "SnapshotsResponse\022A\n\016DeleteSnapshot\022\026.De" + - "leteSnapshotRequest\032\027.DeleteSnapshotResp" + - "onse\022A\n\016IsSnapshotDone\022\026.IsSnapshotDoneR" + - "equest\032\027.IsSnapshotDoneResponse\022D\n\017Resto", - "reSnapshot\022\027.RestoreSnapshotRequest\032\030.Re" + - "storeSnapshotResponse\022V\n\025IsRestoreSnapsh" + - "otDone\022\035.IsRestoreSnapshotDoneRequest\032\036." + - "IsRestoreSnapshotDoneResponse\022>\n\rExecPro" + - "cedure\022\025.ExecProcedureRequest\032\026.ExecProc" + - "edureResponse\022E\n\024ExecProcedureWithRet\022\025." + - "ExecProcedureRequest\032\026.ExecProcedureResp" + - "onse\022D\n\017IsProcedureDone\022\027.IsProcedureDon" + - "eRequest\032\030.IsProcedureDoneResponse\022D\n\017Mo" + - "difyNamespace\022\027.ModifyNamespaceRequest\032\030", - ".ModifyNamespaceResponse\022D\n\017CreateNamesp" + - "ace\022\027.CreateNamespaceRequest\032\030.CreateNam" + - "espaceResponse\022D\n\017DeleteNamespace\022\027.Dele" + - "teNamespaceRequest\032\030.DeleteNamespaceResp" + - "onse\022Y\n\026GetNamespaceDescriptor\022\036.GetName" + - "spaceDescriptorRequest\032\037.GetNamespaceDes" + - "criptorResponse\022_\n\030ListNamespaceDescript" + - "ors\022 .ListNamespaceDescriptorsRequest\032!." + - "ListNamespaceDescriptorsResponse\022t\n\037List" + - "TableDescriptorsByNamespace\022\'.ListTableD", - "escriptorsByNamespaceRequest\032(.ListTable" + - "DescriptorsByNamespaceResponse\022b\n\031ListTa" + - "bleNamesByNamespace\022!.ListTableNamesByNa" + - "mespaceRequest\032\".ListTableNamesByNamespa" + - "ceResponse\022>\n\rGetTableState\022\025.GetTableSt" + - "ateRequest\032\026.GetTableStateResponse\022/\n\010Se" + - "tQuota\022\020.SetQuotaRequest\032\021.SetQuotaRespo" + - "nseBB\n*org.apache.hadoop.hbase.protobuf." + - "generatedB\014MasterProtosH\001\210\001\001\240\001\001" + "equest\"\022\n\020SetQuotaResponse\"\034\n\032UpdateConf" + + "igurationRequest\"\035\n\033UpdateConfigurationR" + + "esponse2\270\031\n\rMasterService\022S\n\024GetSchemaAl", + "terStatus\022\034.GetSchemaAlterStatusRequest\032" + + "\035.GetSchemaAlterStatusResponse\022P\n\023GetTab" + + "leDescriptors\022\033.GetTableDescriptorsReque" + + "st\032\034.GetTableDescriptorsResponse\022>\n\rGetT" + + "ableNames\022\025.GetTableNamesRequest\032\026.GetTa" + + "bleNamesResponse\022G\n\020GetClusterStatus\022\030.G" + + "etClusterStatusRequest\032\031.GetClusterStatu" + + "sResponse\022D\n\017IsMasterRunning\022\027.IsMasterR" + + "unningRequest\032\030.IsMasterRunningResponse\022" + + "2\n\tAddColumn\022\021.AddColumnRequest\032\022.AddCol", + "umnResponse\022;\n\014DeleteColumn\022\024.DeleteColu" + + "mnRequest\032\025.DeleteColumnResponse\022;\n\014Modi" + + "fyColumn\022\024.ModifyColumnRequest\032\025.ModifyC" + + "olumnResponse\0225\n\nMoveRegion\022\022.MoveRegion" + + "Request\032\023.MoveRegionResponse\022Y\n\026Dispatch" + + "MergingRegions\022\036.DispatchMergingRegionsR" + + "equest\032\037.DispatchMergingRegionsResponse\022" + + ";\n\014AssignRegion\022\024.AssignRegionRequest\032\025." + + "AssignRegionResponse\022A\n\016UnassignRegion\022\026" + + ".UnassignRegionRequest\032\027.UnassignRegionR", + "esponse\022>\n\rOfflineRegion\022\025.OfflineRegion" + + "Request\032\026.OfflineRegionResponse\0228\n\013Delet" + + "eTable\022\023.DeleteTableRequest\032\024.DeleteTabl" + + "eResponse\022>\n\rtruncateTable\022\025.TruncateTab" + + "leRequest\032\026.TruncateTableResponse\0228\n\013Ena" + + "bleTable\022\023.EnableTableRequest\032\024.EnableTa" + + "bleResponse\022;\n\014DisableTable\022\024.DisableTab" + + "leRequest\032\025.DisableTableResponse\0228\n\013Modi" + + "fyTable\022\023.ModifyTableRequest\032\024.ModifyTab" + + "leResponse\0228\n\013CreateTable\022\023.CreateTableR", + "equest\032\024.CreateTableResponse\022/\n\010Shutdown" + + "\022\020.ShutdownRequest\032\021.ShutdownResponse\0225\n" + + "\nStopMaster\022\022.StopMasterRequest\032\023.StopMa" + + "sterResponse\022,\n\007Balance\022\017.BalanceRequest" + + "\032\020.BalanceResponse\022M\n\022SetBalancerRunning" + + "\022\032.SetBalancerRunningRequest\032\033.SetBalanc" + + "erRunningResponse\022A\n\016RunCatalogScan\022\026.Ru" + + "nCatalogScanRequest\032\027.RunCatalogScanResp" + + "onse\022S\n\024EnableCatalogJanitor\022\034.EnableCat" + + "alogJanitorRequest\032\035.EnableCatalogJanito", + "rResponse\022\\\n\027IsCatalogJanitorEnabled\022\037.I" + + "sCatalogJanitorEnabledRequest\032 .IsCatalo" + + "gJanitorEnabledResponse\022L\n\021ExecMasterSer" + + "vice\022\032.CoprocessorServiceRequest\032\033.Copro" + + "cessorServiceResponse\022/\n\010Snapshot\022\020.Snap" + + "shotRequest\032\021.SnapshotResponse\022V\n\025GetCom" + + "pletedSnapshots\022\035.GetCompletedSnapshotsR" + + "equest\032\036.GetCompletedSnapshotsResponse\022A" + + "\n\016DeleteSnapshot\022\026.DeleteSnapshotRequest" + + "\032\027.DeleteSnapshotResponse\022A\n\016IsSnapshotD", + "one\022\026.IsSnapshotDoneRequest\032\027.IsSnapshot" + + "DoneResponse\022D\n\017RestoreSnapshot\022\027.Restor" + + "eSnapshotRequest\032\030.RestoreSnapshotRespon" + + "se\022V\n\025IsRestoreSnapshotDone\022\035.IsRestoreS" + + "napshotDoneRequest\032\036.IsRestoreSnapshotDo" + + "neResponse\022>\n\rExecProcedure\022\025.ExecProced" + + "ureRequest\032\026.ExecProcedureResponse\022E\n\024Ex" + + "ecProcedureWithRet\022\025.ExecProcedureReques" + + "t\032\026.ExecProcedureResponse\022D\n\017IsProcedure" + + "Done\022\027.IsProcedureDoneRequest\032\030.IsProced", + "ureDoneResponse\022D\n\017ModifyNamespace\022\027.Mod" + + "ifyNamespaceRequest\032\030.ModifyNamespaceRes" + + "ponse\022D\n\017CreateNamespace\022\027.CreateNamespa" + + "ceRequest\032\030.CreateNamespaceResponse\022D\n\017D" + + "eleteNamespace\022\027.DeleteNamespaceRequest\032" + + "\030.DeleteNamespaceResponse\022Y\n\026GetNamespac" + + "eDescriptor\022\036.GetNamespaceDescriptorRequ" + + "est\032\037.GetNamespaceDescriptorResponse\022_\n\030" + + "ListNamespaceDescriptors\022 .ListNamespace" + + "DescriptorsRequest\032!.ListNamespaceDescri", + "ptorsResponse\022t\n\037ListTableDescriptorsByN" + + "amespace\022\'.ListTableDescriptorsByNamespa" + + "ceRequest\032(.ListTableDescriptorsByNamesp" + + "aceResponse\022b\n\031ListTableNamesByNamespace" + + "\022!.ListTableNamesByNamespaceRequest\032\".Li" + + "stTableNamesByNamespaceResponse\022>\n\rGetTa" + + "bleState\022\025.GetTableStateRequest\032\026.GetTab" + + "leStateResponse\022/\n\010SetQuota\022\020.SetQuotaRe" + + "quest\032\021.SetQuotaResponse\022P\n\023UpdateConfig" + + "uration\022\033.UpdateConfigurationRequest\032\034.U", + "pdateConfigurationResponseBB\n*org.apache" + + ".hadoop.hbase.protobuf.generatedB\014Master" + + "ProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -48627,6 +49388,18 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetQuotaResponse_descriptor, new java.lang.String[] { }); + internal_static_UpdateConfigurationRequest_descriptor = + getDescriptor().getMessageTypes().get(86); + internal_static_UpdateConfigurationRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateConfigurationRequest_descriptor, + new java.lang.String[] { }); + internal_static_UpdateConfigurationResponse_descriptor = + getDescriptor().getMessageTypes().get(87); + internal_static_UpdateConfigurationResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateConfigurationResponse_descriptor, + new java.lang.String[] { }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 5d60e55..f454904 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -382,6 +382,12 @@ message SetQuotaRequest { message SetQuotaResponse { } +message UpdateConfigurationRequest { +} + +message UpdateConfigurationResponse { +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -596,4 +602,7 @@ service MasterService { /** Apply the new quota settings */ rpc SetQuota(SetQuotaRequest) returns(SetQuotaResponse); + + rpc UpdateConfiguration(UpdateConfigurationRequest) + returns(UpdateConfigurationResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 3437f34..721e3c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1960,4 +1960,16 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } return tableNames; } + + /** + * Reload the configuration from disk. + */ + public void updateConfiguration() { + LOG.info("Reloading the configuration from disk."); + // Reload the configuration from disk. + conf.reloadConfiguration(); + synchronized (this.balancer) { + balancer.setConf(conf); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 2efcf63..702863b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -144,6 +144,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -264,6 +265,14 @@ public class MasterRpcServices extends RSRpcServices bssi.addAll(super.getServices()); return bssi; } + + @Override + public UpdateConfigurationResponse updateConfiguration(RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UpdateConfigurationRequest + request) throws ServiceException { + master.updateConfiguration(); + return UpdateConfigurationResponse.getDefaultInstance(); + } @Override public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index daeca1c..2d7d444 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -32,6 +32,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -136,15 +138,19 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember); - localityCandidateGenerator = new LocalityBasedCandidateGenerator(services); + if (localityCandidateGenerator == null) { + localityCandidateGenerator = new LocalityBasedCandidateGenerator(services); + } localityCost = new LocalityCostFunction(conf, services); - candidateGenerators = new CandidateGenerator[] { - new RandomCandidateGenerator(), - new LoadCandidateGenerator(), - localityCandidateGenerator, - new RegionReplicaRackCandidateGenerator(), - }; + if (candidateGenerators == null) { + candidateGenerators = new CandidateGenerator[] { + new RandomCandidateGenerator(), + new LoadCandidateGenerator(), + localityCandidateGenerator, + new RegionReplicaRackCandidateGenerator(), + }; + } regionLoadFunctions = new CostFromRegionLoadFunction[] { new ReadRequestCostFunction(conf), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java index 178bc53..f865db6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java @@ -46,4 +46,11 @@ public class TestUpdateConfiguration { ServerName server = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); admin.updateConfiguration(server); } + + @Test + public void testMasterOnlineConfigChange() throws IOException { + LOG.debug("Starting the test"); + Admin admin = TEST_UTIL.getHBaseAdmin(); + admin.updateMasterConfiguration(); + } }