diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 51a26bc..fe454b3 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -942,6 +942,13 @@ public interface Admin extends Abortable, Closeable { void stopMaster() throws IOException; /** + * Check whether Master is in maintenance mode + * + * @throws IOException if a remote or network exception occurs + */ + boolean isMasterInMaintenanceMode() throws IOException; + + /** * Stop the designated regionserver * * @param hostnamePort Hostname and port delimited by a : as in diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index d93a8b4..a11aaba 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1416,6 +1416,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( + final RpcController controller, + final MasterProtos.IsInMaintenanceModeRequest request) throws ServiceException { + return stub.isMasterInMaintenanceMode(controller, request); + } + + @Override public MasterProtos.BalanceResponse balance(RpcController controller, MasterProtos.BalanceRequest request) throws ServiceException { return stub.balance(controller, request); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index e21a5d2..a6a3535 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -129,6 +129,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterSta import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; @@ -1841,6 +1843,19 @@ public class HBaseAdmin implements Admin { } @Override + public boolean isMasterInMaintenanceMode() throws IOException { + return executeCallable(new MasterCallable(getConnection()) { + @Override + public IsInMaintenanceModeResponse call(int callTimeout) throws ServiceException { + PayloadCarryingRpcController controller = rpcControllerFactory.newController(); + controller.setCallTimeout(callTimeout); + return master.isMasterInMaintenanceMode( + controller, IsInMaintenanceModeRequest.newBuilder().build()); + } + }).getInMaintenanceMode(); + } + + @Override public ClusterStatus getClusterStatus() throws IOException { return executeCallable(new MasterCallable(getConnection()) { @Override diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index ff3d1c7..ec79167 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -125,6 +125,10 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public String recoveringRegionsZNode; // znode containing namespace descriptors public static String namespaceZNode = "namespace"; + // znode of indicating master maintenance mode + public static String masterMaintZNode = "masterMaintenance"; + // znode of running hbck + public static String hbckZNode; public final static String META_ZNODE_PREFIX = "meta-region-server"; @@ -196,6 +200,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode); ZKUtil.createAndFailSilent(this, tableLockZNode); ZKUtil.createAndFailSilent(this, recoveringRegionsZNode); + ZKUtil.createAndFailSilent(this, masterMaintZNode); } catch (KeeperException e) { throw new ZooKeeperConnectionException( prefix("Unexpected KeeperException creating base node"), e); @@ -445,6 +450,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { conf.get("zookeeper.znode.recovering.regions", "recovering-regions")); namespaceZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); + masterMaintZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); + hbckZNode = ZKUtil.joinZNode(masterMaintZNode, conf.get("zookeeper.znode.hbck", "hbck")); } /** diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index b4bd7af..3ec9953 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -26224,38 +26224,28 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.StopMasterResponse) } - public interface BalanceRequestOrBuilder + public interface IsInMaintenanceModeRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - - // optional bool force = 1; - /** - * optional bool force = 1; - */ - boolean hasForce(); - /** - * optional bool force = 1; - */ - boolean getForce(); } /** - * Protobuf type {@code hbase.pb.BalanceRequest} + * Protobuf type {@code hbase.pb.IsInMaintenanceModeRequest} */ - public static final class BalanceRequest extends + public static final class IsInMaintenanceModeRequest extends com.google.protobuf.GeneratedMessage - implements BalanceRequestOrBuilder { - // Use BalanceRequest.newBuilder() to construct. - private BalanceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsInMaintenanceModeRequestOrBuilder { + // Use IsInMaintenanceModeRequest.newBuilder() to construct. + private IsInMaintenanceModeRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BalanceRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsInMaintenanceModeRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BalanceRequest defaultInstance; - public static BalanceRequest getDefaultInstance() { + private static final IsInMaintenanceModeRequest defaultInstance; + public static IsInMaintenanceModeRequest getDefaultInstance() { return defaultInstance; } - public BalanceRequest getDefaultInstanceForType() { + public IsInMaintenanceModeRequest getDefaultInstanceForType() { return defaultInstance; } @@ -26265,12 +26255,11 @@ public final class MasterProtos { getUnknownFields() { return this.unknownFields; } - private BalanceRequest( + private IsInMaintenanceModeRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -26288,11 +26277,6 @@ public final class MasterProtos { } break; } - case 8: { - bitField0_ |= 0x00000001; - force_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -26307,50 +26291,32 @@ public final class MasterProtos { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BalanceRequest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsInMaintenanceModeRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BalanceRequest(input, extensionRegistry); + return new IsInMaintenanceModeRequest(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // optional bool force = 1; - public static final int FORCE_FIELD_NUMBER = 1; - private boolean force_; - /** - * optional bool force = 1; - */ - public boolean hasForce() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool force = 1; - */ - public boolean getForce() { - return force_; - } - private void initFields() { - force_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -26364,9 +26330,6 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, force_); - } getUnknownFields().writeTo(output); } @@ -26376,10 +26339,6 @@ public final class MasterProtos { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, force_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -26397,17 +26356,12 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) obj; boolean result = true; - result = result && (hasForce() == other.hasForce()); - if (hasForce()) { - result = result && (getForce() - == other.getForce()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -26421,62 +26375,58 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasForce()) { - hash = (37 * hash) + FORCE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getForce()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -26485,7 +26435,7 @@ public final class MasterProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -26497,24 +26447,24 @@ public final class MasterProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BalanceRequest} + * Protobuf type {@code hbase.pb.IsInMaintenanceModeRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -26534,8 +26484,6 @@ public final class MasterProtos { public Builder clear() { super.clear(); - force_ = false; - bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -26545,48 +26493,38 @@ public final class MasterProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.force_ = force_; - result.bitField0_ = to_bitField0_; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance()) return this; - if (other.hasForce()) { - setForce(other.getForce()); - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -26599,11 +26537,11 @@ public final class MasterProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -26612,84 +26550,50 @@ public final class MasterProtos { } return this; } - private int bitField0_; - // optional bool force = 1; - private boolean force_ ; - /** - * optional bool force = 1; - */ - public boolean hasForce() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool force = 1; - */ - public boolean getForce() { - return force_; - } - /** - * optional bool force = 1; - */ - public Builder setForce(boolean value) { - bitField0_ |= 0x00000001; - force_ = value; - onChanged(); - return this; - } - /** - * optional bool force = 1; - */ - public Builder clearForce() { - bitField0_ = (bitField0_ & ~0x00000001); - force_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsInMaintenanceModeRequest) } static { - defaultInstance = new BalanceRequest(true); + defaultInstance = new IsInMaintenanceModeRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BalanceRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.IsInMaintenanceModeRequest) } - public interface BalanceResponseOrBuilder + public interface IsInMaintenanceModeResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool balancer_ran = 1; + // required bool inMaintenanceMode = 1; /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - boolean hasBalancerRan(); + boolean hasInMaintenanceMode(); /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - boolean getBalancerRan(); + boolean getInMaintenanceMode(); } /** - * Protobuf type {@code hbase.pb.BalanceResponse} + * Protobuf type {@code hbase.pb.IsInMaintenanceModeResponse} */ - public static final class BalanceResponse extends + public static final class IsInMaintenanceModeResponse extends com.google.protobuf.GeneratedMessage - implements BalanceResponseOrBuilder { - // Use BalanceResponse.newBuilder() to construct. - private BalanceResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements IsInMaintenanceModeResponseOrBuilder { + // Use IsInMaintenanceModeResponse.newBuilder() to construct. + private IsInMaintenanceModeResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BalanceResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private IsInMaintenanceModeResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BalanceResponse defaultInstance; - public static BalanceResponse getDefaultInstance() { + private static final IsInMaintenanceModeResponse defaultInstance; + public static IsInMaintenanceModeResponse getDefaultInstance() { return defaultInstance; } - public BalanceResponse getDefaultInstanceForType() { + public IsInMaintenanceModeResponse getDefaultInstanceForType() { return defaultInstance; } @@ -26699,7 +26603,7 @@ public final class MasterProtos { getUnknownFields() { return this.unknownFields; } - private BalanceResponse( + private IsInMaintenanceModeResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -26724,7 +26628,7 @@ public final class MasterProtos { } case 8: { bitField0_ |= 0x00000001; - balancerRan_ = input.readBool(); + inMaintenanceMode_ = input.readBool(); break; } } @@ -26741,57 +26645,57 @@ public final class MasterProtos { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BalanceResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsInMaintenanceModeResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BalanceResponse(input, extensionRegistry); + return new IsInMaintenanceModeResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required bool balancer_ran = 1; - public static final int BALANCER_RAN_FIELD_NUMBER = 1; - private boolean balancerRan_; + // required bool inMaintenanceMode = 1; + public static final int INMAINTENANCEMODE_FIELD_NUMBER = 1; + private boolean inMaintenanceMode_; /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - public boolean hasBalancerRan() { + public boolean hasInMaintenanceMode() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - public boolean getBalancerRan() { - return balancerRan_; + public boolean getInMaintenanceMode() { + return inMaintenanceMode_; } private void initFields() { - balancerRan_ = false; + inMaintenanceMode_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasBalancerRan()) { + if (!hasInMaintenanceMode()) { memoizedIsInitialized = 0; return false; } @@ -26803,7 +26707,7 @@ public final class MasterProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, balancerRan_); + output.writeBool(1, inMaintenanceMode_); } getUnknownFields().writeTo(output); } @@ -26816,7 +26720,7 @@ public final class MasterProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, balancerRan_); + .computeBoolSize(1, inMaintenanceMode_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -26835,16 +26739,16 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) obj; boolean result = true; - result = result && (hasBalancerRan() == other.hasBalancerRan()); - if (hasBalancerRan()) { - result = result && (getBalancerRan() - == other.getBalancerRan()); + result = result && (hasInMaintenanceMode() == other.hasInMaintenanceMode()); + if (hasInMaintenanceMode()) { + result = result && (getInMaintenanceMode() + == other.getInMaintenanceMode()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -26859,62 +26763,62 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBalancerRan()) { - hash = (37 * hash) + BALANCER_RAN_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getBalancerRan()); + if (hasInMaintenanceMode()) { + hash = (37 * hash) + INMAINTENANCEMODE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getInMaintenanceMode()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -26923,7 +26827,7 @@ public final class MasterProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -26935,24 +26839,24 @@ public final class MasterProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BalanceResponse} + * Protobuf type {@code hbase.pb.IsInMaintenanceModeResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -26972,7 +26876,7 @@ public final class MasterProtos { public Builder clear() { super.clear(); - balancerRan_ = false; + inMaintenanceMode_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -26983,54 +26887,54 @@ public final class MasterProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.balancerRan_ = balancerRan_; + result.inMaintenanceMode_ = inMaintenanceMode_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()) return this; - if (other.hasBalancerRan()) { - setBalancerRan(other.getBalancerRan()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()) return this; + if (other.hasInMaintenanceMode()) { + setInMaintenanceMode(other.getInMaintenanceMode()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasBalancerRan()) { + if (!hasInMaintenanceMode()) { return false; } @@ -27041,11 +26945,11 @@ public final class MasterProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -27056,92 +26960,82 @@ public final class MasterProtos { } private int bitField0_; - // required bool balancer_ran = 1; - private boolean balancerRan_ ; + // required bool inMaintenanceMode = 1; + private boolean inMaintenanceMode_ ; /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - public boolean hasBalancerRan() { + public boolean hasInMaintenanceMode() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - public boolean getBalancerRan() { - return balancerRan_; + public boolean getInMaintenanceMode() { + return inMaintenanceMode_; } /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - public Builder setBalancerRan(boolean value) { + public Builder setInMaintenanceMode(boolean value) { bitField0_ |= 0x00000001; - balancerRan_ = value; + inMaintenanceMode_ = value; onChanged(); return this; } /** - * required bool balancer_ran = 1; + * required bool inMaintenanceMode = 1; */ - public Builder clearBalancerRan() { + public Builder clearInMaintenanceMode() { bitField0_ = (bitField0_ & ~0x00000001); - balancerRan_ = false; + inMaintenanceMode_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.IsInMaintenanceModeResponse) } static { - defaultInstance = new BalanceResponse(true); + defaultInstance = new IsInMaintenanceModeResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BalanceResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.IsInMaintenanceModeResponse) } - public interface SetBalancerRunningRequestOrBuilder + public interface BalanceRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bool on = 1; - /** - * required bool on = 1; - */ - boolean hasOn(); - /** - * required bool on = 1; - */ - boolean getOn(); - - // optional bool synchronous = 2; + // optional bool force = 1; /** - * optional bool synchronous = 2; + * optional bool force = 1; */ - boolean hasSynchronous(); + boolean hasForce(); /** - * optional bool synchronous = 2; + * optional bool force = 1; */ - boolean getSynchronous(); + boolean getForce(); } /** - * Protobuf type {@code hbase.pb.SetBalancerRunningRequest} + * Protobuf type {@code hbase.pb.BalanceRequest} */ - public static final class SetBalancerRunningRequest extends + public static final class BalanceRequest extends com.google.protobuf.GeneratedMessage - implements SetBalancerRunningRequestOrBuilder { - // Use SetBalancerRunningRequest.newBuilder() to construct. - private SetBalancerRunningRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements BalanceRequestOrBuilder { + // Use BalanceRequest.newBuilder() to construct. + private BalanceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SetBalancerRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private BalanceRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SetBalancerRunningRequest defaultInstance; - public static SetBalancerRunningRequest getDefaultInstance() { + private static final BalanceRequest defaultInstance; + public static BalanceRequest getDefaultInstance() { return defaultInstance; } - public SetBalancerRunningRequest getDefaultInstanceForType() { + public BalanceRequest getDefaultInstanceForType() { return defaultInstance; } @@ -27151,7 +27045,893 @@ public final class MasterProtos { getUnknownFields() { return this.unknownFields; } - private SetBalancerRunningRequest( + private BalanceRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + force_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional bool force = 1; + public static final int FORCE_FIELD_NUMBER = 1; + private boolean force_; + /** + * optional bool force = 1; + */ + public boolean hasForce() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bool force = 1; + */ + public boolean getForce() { + return force_; + } + + private void initFields() { + force_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, force_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, force_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) obj; + + boolean result = true; + result = result && (hasForce() == other.hasForce()); + if (hasForce()) { + result = result && (getForce() + == other.getForce()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasForce()) { + hash = (37 * hash) + FORCE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getForce()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BalanceRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + force_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.force_ = force_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance()) return this; + if (other.hasForce()) { + setForce(other.getForce()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional bool force = 1; + private boolean force_ ; + /** + * optional bool force = 1; + */ + public boolean hasForce() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bool force = 1; + */ + public boolean getForce() { + return force_; + } + /** + * optional bool force = 1; + */ + public Builder setForce(boolean value) { + bitField0_ |= 0x00000001; + force_ = value; + onChanged(); + return this; + } + /** + * optional bool force = 1; + */ + public Builder clearForce() { + bitField0_ = (bitField0_ & ~0x00000001); + force_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceRequest) + } + + static { + defaultInstance = new BalanceRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BalanceRequest) + } + + public interface BalanceResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool balancer_ran = 1; + /** + * required bool balancer_ran = 1; + */ + boolean hasBalancerRan(); + /** + * required bool balancer_ran = 1; + */ + boolean getBalancerRan(); + } + /** + * Protobuf type {@code hbase.pb.BalanceResponse} + */ + public static final class BalanceResponse extends + com.google.protobuf.GeneratedMessage + implements BalanceResponseOrBuilder { + // Use BalanceResponse.newBuilder() to construct. + private BalanceResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceResponse defaultInstance; + public static BalanceResponse getDefaultInstance() { + return defaultInstance; + } + + public BalanceResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + balancerRan_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool balancer_ran = 1; + public static final int BALANCER_RAN_FIELD_NUMBER = 1; + private boolean balancerRan_; + /** + * required bool balancer_ran = 1; + */ + public boolean hasBalancerRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balancer_ran = 1; + */ + public boolean getBalancerRan() { + return balancerRan_; + } + + private void initFields() { + balancerRan_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBalancerRan()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, balancerRan_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, balancerRan_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) obj; + + boolean result = true; + result = result && (hasBalancerRan() == other.hasBalancerRan()); + if (hasBalancerRan()) { + result = result && (getBalancerRan() + == other.getBalancerRan()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBalancerRan()) { + hash = (37 * hash) + BALANCER_RAN_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBalancerRan()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BalanceResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + balancerRan_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.balancerRan_ = balancerRan_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()) return this; + if (other.hasBalancerRan()) { + setBalancerRan(other.getBalancerRan()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBalancerRan()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool balancer_ran = 1; + private boolean balancerRan_ ; + /** + * required bool balancer_ran = 1; + */ + public boolean hasBalancerRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balancer_ran = 1; + */ + public boolean getBalancerRan() { + return balancerRan_; + } + /** + * required bool balancer_ran = 1; + */ + public Builder setBalancerRan(boolean value) { + bitField0_ |= 0x00000001; + balancerRan_ = value; + onChanged(); + return this; + } + /** + * required bool balancer_ran = 1; + */ + public Builder clearBalancerRan() { + bitField0_ = (bitField0_ & ~0x00000001); + balancerRan_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceResponse) + } + + static { + defaultInstance = new BalanceResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BalanceResponse) + } + + public interface SetBalancerRunningRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool on = 1; + /** + * required bool on = 1; + */ + boolean hasOn(); + /** + * required bool on = 1; + */ + boolean getOn(); + + // optional bool synchronous = 2; + /** + * optional bool synchronous = 2; + */ + boolean hasSynchronous(); + /** + * optional bool synchronous = 2; + */ + boolean getSynchronous(); + } + /** + * Protobuf type {@code hbase.pb.SetBalancerRunningRequest} + */ + public static final class SetBalancerRunningRequest extends + com.google.protobuf.GeneratedMessage + implements SetBalancerRunningRequestOrBuilder { + // Use SetBalancerRunningRequest.newBuilder() to construct. + private SetBalancerRunningRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SetBalancerRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetBalancerRunningRequest defaultInstance; + public static SetBalancerRunningRequest getDefaultInstance() { + return defaultInstance; + } + + public SetBalancerRunningRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SetBalancerRunningRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -60534,6 +61314,19 @@ public final class MasterProtos { com.google.protobuf.RpcCallback done); /** + * rpc IsMasterInMaintenanceMode(.hbase.pb.IsInMaintenanceModeRequest) returns (.hbase.pb.IsInMaintenanceModeResponse); + * + *
+       **
+       * Query whether the Master is in maintenance mode.
+       * 
+ */ + public abstract void isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, + com.google.protobuf.RpcCallback done); + + /** * rpc Balance(.hbase.pb.BalanceRequest) returns (.hbase.pb.BalanceResponse); * *
@@ -61160,6 +61953,14 @@ public final class MasterProtos {
         }
 
         @java.lang.Override
+        public  void isMasterInMaintenanceMode(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request,
+            com.google.protobuf.RpcCallback done) {
+          impl.isMasterInMaintenanceMode(controller, request, done);
+        }
+
+        @java.lang.Override
         public  void balance(
             com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request,
@@ -61512,76 +62313,78 @@ public final class MasterProtos {
             case 20:
               return impl.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)request);
             case 21:
-              return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request);
+              return impl.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request);
             case 22:
-              return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request);
+              return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request);
             case 23:
-              return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request);
+              return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request);
             case 24:
-              return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request);
+              return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request);
             case 25:
-              return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request);
+              return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request);
             case 26:
-              return impl.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request);
+              return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request);
             case 27:
-              return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request);
+              return impl.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request);
             case 28:
-              return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request);
+              return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request);
             case 29:
-              return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request);
+              return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request);
             case 30:
-              return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request);
+              return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request);
             case 31:
-              return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request);
+              return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request);
             case 32:
-              return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request);
+              return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request);
             case 33:
-              return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
+              return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request);
             case 34:
-              return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request);
+              return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
             case 35:
-              return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request);
+              return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request);
             case 36:
-              return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request);
+              return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request);
             case 37:
-              return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request);
+              return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request);
             case 38:
-              return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
+              return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request);
             case 39:
-              return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
+              return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
             case 40:
-              return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
+              return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
             case 41:
-              return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request);
+              return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
             case 42:
-              return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request);
+              return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request);
             case 43:
-              return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request);
+              return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request);
             case 44:
-              return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request);
+              return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request);
             case 45:
-              return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request);
+              return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request);
             case 46:
-              return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request);
+              return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request);
             case 47:
-              return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
+              return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request);
             case 48:
-              return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
+              return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
             case 49:
-              return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
+              return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
             case 50:
-              return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
+              return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
             case 51:
-              return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
+              return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
             case 52:
-              return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
+              return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
             case 53:
-              return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
+              return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
             case 54:
-              return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request);
+              return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
             case 55:
-              return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
+              return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request);
             case 56:
+              return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
+            case 57:
               return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -61640,76 +62443,78 @@ public final class MasterProtos {
             case 20:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance();
             case 21:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance();
             case 22:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
             case 23:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
             case 24:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
             case 25:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
             case 26:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
             case 27:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
             case 28:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
             case 29:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
             case 30:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
             case 31:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
             case 32:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
             case 33:
-              return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
             case 34:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
             case 35:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
             case 36:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
             case 37:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
             case 38:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
             case 39:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
             case 40:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
             case 41:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
             case 42:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
             case 43:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
             case 44:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
             case 45:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
             case 46:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
             case 47:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
             case 48:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
             case 49:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
             case 50:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
             case 51:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
             case 52:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
             case 53:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
             case 54:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
             case 55:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
             case 56:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+            case 57:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -61768,76 +62573,78 @@ public final class MasterProtos {
             case 20:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance();
             case 21:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance();
             case 22:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
             case 23:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
             case 24:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
             case 25:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
             case 26:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
             case 27:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
             case 28:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
             case 29:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
             case 30:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
             case 31:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
             case 32:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
             case 33:
-              return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
             case 34:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
             case 35:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
             case 36:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
             case 37:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
             case 38:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
             case 39:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
             case 40:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
             case 41:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
             case 42:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
             case 43:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
             case 44:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
             case 45:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
             case 46:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
             case 47:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
             case 48:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
             case 49:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
             case 50:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
             case 51:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
             case 52:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
             case 53:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
             case 54:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
             case 55:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
             case 56:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+            case 57:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -62108,6 +62915,19 @@ public final class MasterProtos {
         com.google.protobuf.RpcCallback done);
 
     /**
+     * rpc IsMasterInMaintenanceMode(.hbase.pb.IsInMaintenanceModeRequest) returns (.hbase.pb.IsInMaintenanceModeResponse);
+     *
+     * 
+     **
+     * Query whether the Master is in maintenance mode.
+     * 
+ */ + public abstract void isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, + com.google.protobuf.RpcCallback done); + + /** * rpc Balance(.hbase.pb.BalanceRequest) returns (.hbase.pb.BalanceResponse); * *
@@ -62688,181 +63508,186 @@ public final class MasterProtos {
               done));
           return;
         case 21:
+          this.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request,
+            com.google.protobuf.RpcUtil.specializeCallback(
+              done));
+          return;
+        case 22:
           this.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 22:
+        case 23:
           this.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 23:
+        case 24:
           this.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 24:
+        case 25:
           this.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 25:
+        case 26:
           this.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 26:
+        case 27:
           this.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 27:
+        case 28:
           this.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 28:
+        case 29:
           this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 29:
+        case 30:
           this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 30:
+        case 31:
           this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 31:
+        case 32:
           this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 32:
+        case 33:
           this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 33:
+        case 34:
           this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 34:
+        case 35:
           this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 35:
+        case 36:
           this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 36:
+        case 37:
           this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 37:
+        case 38:
           this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 38:
+        case 39:
           this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 39:
+        case 40:
           this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 40:
+        case 41:
           this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 41:
+        case 42:
           this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 42:
+        case 43:
           this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 43:
+        case 44:
           this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 44:
+        case 45:
           this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 45:
+        case 46:
           this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 46:
+        case 47:
           this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 47:
+        case 48:
           this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 48:
+        case 49:
           this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 49:
+        case 50:
           this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 50:
+        case 51:
           this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 51:
+        case 52:
           this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 52:
+        case 53:
           this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 53:
+        case 54:
           this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 54:
+        case 55:
           this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 55:
+        case 56:
           this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 56:
+        case 57:
           this.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
@@ -62924,76 +63749,78 @@ public final class MasterProtos {
         case 20:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance();
         case 21:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance();
         case 22:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
         case 23:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
         case 24:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
         case 25:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
         case 26:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
         case 27:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
         case 28:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
         case 29:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
         case 30:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
         case 31:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
         case 32:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
         case 33:
-          return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
         case 34:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
         case 35:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
         case 36:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
         case 37:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
         case 38:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
         case 39:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
         case 40:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
         case 41:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
         case 42:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
         case 43:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
         case 44:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
         case 45:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
         case 46:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
         case 47:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
         case 48:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
         case 49:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
         case 50:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
         case 51:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
         case 52:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
         case 53:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
         case 54:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
         case 55:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
         case 56:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+        case 57:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -63052,76 +63879,78 @@ public final class MasterProtos {
         case 20:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance();
         case 21:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance();
         case 22:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
         case 23:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
         case 24:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
         case 25:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
         case 26:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
         case 27:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
         case 28:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
         case 29:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
         case 30:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
         case 31:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
         case 32:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
         case 33:
-          return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
         case 34:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
         case 35:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
         case 36:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
         case 37:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
         case 38:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
         case 39:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
         case 40:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
         case 41:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
         case 42:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
         case 43:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
         case 44:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
         case 45:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
         case 46:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
         case 47:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
         case 48:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
         case 49:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
         case 50:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
         case 51:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
         case 52:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
         case 53:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
         case 54:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
         case 55:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
         case 56:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+        case 57:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -63459,12 +64288,27 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()));
       }
 
+      public  void isMasterInMaintenanceMode(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request,
+          com.google.protobuf.RpcCallback done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(21),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()));
+      }
+
       public  void balance(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(21),
+          getDescriptor().getMethods().get(22),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(),
@@ -63479,7 +64323,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(22),
+          getDescriptor().getMethods().get(23),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(),
@@ -63494,7 +64338,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(23),
+          getDescriptor().getMethods().get(24),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(),
@@ -63509,7 +64353,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(24),
+          getDescriptor().getMethods().get(25),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(),
@@ -63524,7 +64368,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(25),
+          getDescriptor().getMethods().get(26),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(),
@@ -63539,7 +64383,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(26),
+          getDescriptor().getMethods().get(27),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(),
@@ -63554,7 +64398,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(27),
+          getDescriptor().getMethods().get(28),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(),
@@ -63569,7 +64413,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(28),
+          getDescriptor().getMethods().get(29),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(),
@@ -63584,7 +64428,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(29),
+          getDescriptor().getMethods().get(30),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(),
@@ -63599,7 +64443,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(30),
+          getDescriptor().getMethods().get(31),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(),
@@ -63614,7 +64458,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(31),
+          getDescriptor().getMethods().get(32),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(),
@@ -63629,7 +64473,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(32),
+          getDescriptor().getMethods().get(33),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(),
@@ -63644,7 +64488,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(33),
+          getDescriptor().getMethods().get(34),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(),
@@ -63659,7 +64503,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(34),
+          getDescriptor().getMethods().get(35),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(),
@@ -63674,7 +64518,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(35),
+          getDescriptor().getMethods().get(36),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(),
@@ -63689,7 +64533,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(36),
+          getDescriptor().getMethods().get(37),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(),
@@ -63704,7 +64548,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(37),
+          getDescriptor().getMethods().get(38),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(),
@@ -63719,7 +64563,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(38),
+          getDescriptor().getMethods().get(39),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(),
@@ -63734,7 +64578,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(39),
+          getDescriptor().getMethods().get(40),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -63749,7 +64593,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(40),
+          getDescriptor().getMethods().get(41),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -63764,7 +64608,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(41),
+          getDescriptor().getMethods().get(42),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(),
@@ -63779,7 +64623,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(42),
+          getDescriptor().getMethods().get(43),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(),
@@ -63794,7 +64638,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(43),
+          getDescriptor().getMethods().get(44),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(),
@@ -63809,7 +64653,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(44),
+          getDescriptor().getMethods().get(45),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(),
@@ -63824,7 +64668,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(45),
+          getDescriptor().getMethods().get(46),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(),
@@ -63839,7 +64683,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(46),
+          getDescriptor().getMethods().get(47),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(),
@@ -63854,7 +64698,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(47),
+          getDescriptor().getMethods().get(48),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(),
@@ -63869,7 +64713,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(48),
+          getDescriptor().getMethods().get(49),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(),
@@ -63884,7 +64728,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(49),
+          getDescriptor().getMethods().get(50),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(),
@@ -63899,7 +64743,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(50),
+          getDescriptor().getMethods().get(51),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(),
@@ -63914,7 +64758,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(51),
+          getDescriptor().getMethods().get(52),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -63929,7 +64773,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(52),
+          getDescriptor().getMethods().get(53),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -63944,7 +64788,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(53),
+          getDescriptor().getMethods().get(54),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(),
@@ -63959,7 +64803,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(54),
+          getDescriptor().getMethods().get(55),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(),
@@ -63974,7 +64818,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(55),
+          getDescriptor().getMethods().get(56),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(),
@@ -63989,7 +64833,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(56),
+          getDescriptor().getMethods().get(57),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(),
@@ -64111,6 +64955,11 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request)
           throws com.google.protobuf.ServiceException;
 
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request)
+          throws com.google.protobuf.ServiceException;
+
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request)
@@ -64551,12 +65400,24 @@ public final class MasterProtos {
       }
 
 
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(21),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance());
+      }
+
+
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(21),
+          getDescriptor().getMethods().get(22),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance());
@@ -64568,7 +65429,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(22),
+          getDescriptor().getMethods().get(23),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance());
@@ -64580,7 +65441,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(23),
+          getDescriptor().getMethods().get(24),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance());
@@ -64592,7 +65453,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(24),
+          getDescriptor().getMethods().get(25),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance());
@@ -64604,7 +65465,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(25),
+          getDescriptor().getMethods().get(26),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance());
@@ -64616,7 +65477,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(26),
+          getDescriptor().getMethods().get(27),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance());
@@ -64628,7 +65489,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(27),
+          getDescriptor().getMethods().get(28),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance());
@@ -64640,7 +65501,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(28),
+          getDescriptor().getMethods().get(29),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance());
@@ -64652,7 +65513,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(29),
+          getDescriptor().getMethods().get(30),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance());
@@ -64664,7 +65525,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(30),
+          getDescriptor().getMethods().get(31),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance());
@@ -64676,7 +65537,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(31),
+          getDescriptor().getMethods().get(32),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance());
@@ -64688,7 +65549,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(32),
+          getDescriptor().getMethods().get(33),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance());
@@ -64700,7 +65561,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(33),
+          getDescriptor().getMethods().get(34),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance());
@@ -64712,7 +65573,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(34),
+          getDescriptor().getMethods().get(35),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance());
@@ -64724,7 +65585,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(35),
+          getDescriptor().getMethods().get(36),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance());
@@ -64736,7 +65597,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(36),
+          getDescriptor().getMethods().get(37),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance());
@@ -64748,7 +65609,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(37),
+          getDescriptor().getMethods().get(38),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance());
@@ -64760,7 +65621,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(38),
+          getDescriptor().getMethods().get(39),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance());
@@ -64772,7 +65633,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(39),
+          getDescriptor().getMethods().get(40),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
@@ -64784,7 +65645,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(40),
+          getDescriptor().getMethods().get(41),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
@@ -64796,7 +65657,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(41),
+          getDescriptor().getMethods().get(42),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance());
@@ -64808,7 +65669,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(42),
+          getDescriptor().getMethods().get(43),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance());
@@ -64820,7 +65681,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(43),
+          getDescriptor().getMethods().get(44),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance());
@@ -64832,7 +65693,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(44),
+          getDescriptor().getMethods().get(45),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance());
@@ -64844,7 +65705,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(45),
+          getDescriptor().getMethods().get(46),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance());
@@ -64856,7 +65717,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(46),
+          getDescriptor().getMethods().get(47),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance());
@@ -64868,7 +65729,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(47),
+          getDescriptor().getMethods().get(48),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance());
@@ -64880,7 +65741,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(48),
+          getDescriptor().getMethods().get(49),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance());
@@ -64892,7 +65753,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(49),
+          getDescriptor().getMethods().get(50),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance());
@@ -64904,7 +65765,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(50),
+          getDescriptor().getMethods().get(51),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance());
@@ -64916,7 +65777,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(51),
+          getDescriptor().getMethods().get(52),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -64928,7 +65789,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(52),
+          getDescriptor().getMethods().get(53),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -64940,7 +65801,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(53),
+          getDescriptor().getMethods().get(54),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance());
@@ -64952,7 +65813,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(54),
+          getDescriptor().getMethods().get(55),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance());
@@ -64964,7 +65825,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(55),
+          getDescriptor().getMethods().get(56),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance());
@@ -64976,7 +65837,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(56),
+          getDescriptor().getMethods().get(57),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
@@ -65218,6 +66079,16 @@ public final class MasterProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_StopMasterResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_BalanceRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -65636,251 +66507,256 @@ public final class MasterProtos {
       "aceResponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.p" +
       "b.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shutdo" +
       "wnResponse\"\023\n\021StopMasterRequest\"\024\n\022StopM" +
-      "asterResponse\"\037\n\016BalanceRequest\022\r\n\005force" +
-      "\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" +
-      "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" +
-      "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" +
-      "ncerRunningResponse\022\032\n\022prev_balance_valu",
-      "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" +
-      "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" +
-      "\010\"\212\001\n\035SetSplitOrMergeEnabledRequest\022\017\n\007e" +
-      "nabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swi" +
-      "tch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchT" +
-      "ype\022\021\n\tskip_lock\030\004 \001(\010\"4\n\036SetSplitOrMerg" +
-      "eEnabledResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034" +
-      "IsSplitOrMergeEnabledRequest\022/\n\013switch_t" +
-      "ype\030\001 \002(\0162\032.hbase.pb.MasterSwitchType\"0\n" +
-      "\035IsSplitOrMergeEnabledResponse\022\017\n\007enable",
-      "d\030\001 \002(\010\"+\n)ReleaseSplitOrMergeLockAndRol" +
-      "lbackRequest\",\n*ReleaseSplitOrMergeLockA" +
-      "ndRollbackResponse\"\022\n\020NormalizeRequest\"+" +
-      "\n\021NormalizeResponse\022\026\n\016normalizer_ran\030\001 " +
-      "\002(\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on" +
-      "\030\001 \002(\010\"=\n\034SetNormalizerRunningResponse\022\035" +
-      "\n\025prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNorma" +
-      "lizerEnabledRequest\".\n\033IsNormalizerEnabl" +
-      "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" +
-      "gScanRequest\"-\n\026RunCatalogScanResponse\022\023",
-      "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" +
-      "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" +
-      "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " +
-      "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa" +
-      "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" +
-      "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" +
-      "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" +
-      "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" +
-      "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" +
-      "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013",
-      "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" +
-      "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" +
-      "se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" +
-      "hotResponse\"s\n\026RestoreSnapshotRequest\022/\n" +
-      "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" +
-      "iption\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
-      "\003 \001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007p" +
-      "roc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n" +
-      "\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescr" +
-      "iption\"^\n\026IsSnapshotDoneResponse\022\023\n\004done",
-      "\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase." +
-      "pb.SnapshotDescription\"O\n\034IsRestoreSnaps" +
-      "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" +
-      ".pb.SnapshotDescription\"4\n\035IsRestoreSnap" +
-      "shotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n" +
-      "\033GetSchemaAlterStatusRequest\022\'\n\ntable_na" +
-      "me\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSche" +
-      "maAlterStatusResponse\022\035\n\025yet_to_update_r" +
-      "egions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032" +
-      "GetTableDescriptorsRequest\022(\n\013table_name",
-      "s\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 " +
-      "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" +
-      "\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescriptors" +
-      "Response\022+\n\014table_schema\030\001 \003(\0132\025.hbase.p" +
-      "b.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" +
-      "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" +
-      ":\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNa" +
-      "mesResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase" +
-      ".pb.TableName\"?\n\024GetTableStateRequest\022\'\n" +
-      "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"B",
-      "\n\025GetTableStateResponse\022)\n\013table_state\030\001" +
-      " \002(\0132\024.hbase.pb.TableState\"\031\n\027GetCluster" +
-      "StatusRequest\"K\n\030GetClusterStatusRespons" +
-      "e\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clu" +
-      "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" +
-      "IsMasterRunningResponse\022\031\n\021is_master_run" +
-      "ning\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tpr" +
-      "ocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDescri" +
-      "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" +
-      "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n",
-      "\026IsProcedureDoneRequest\0221\n\tprocedure\030\001 \001" +
-      "(\0132\036.hbase.pb.ProcedureDescription\"`\n\027Is" +
-      "ProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" +
-      "se\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Procedur" +
-      "eDescription\",\n\031GetProcedureResultReques" +
-      "t\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResul" +
-      "tResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetP" +
-      "rocedureResultResponse.State\022\022\n\nstart_ti" +
-      "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" +
-      "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore",
-      "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" +
-      "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" +
-      "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI" +
-      "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr" +
-      "ocedureResponse\022\034\n\024is_procedure_aborted\030" +
-      "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" +
-      "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" +
-      "ase.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\t" +
-      "user_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tn" +
-      "amespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hba",
-      "se.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" +
-      "ypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031." +
-      "hbase.pb.ThrottleRequest\"\022\n\020SetQuotaResp" +
-      "onse\"J\n\037MajorCompactionTimestampRequest\022" +
-      "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName" +
-      "\"U\n(MajorCompactionTimestampForRegionReq" +
-      "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
-      "ecifier\"@\n MajorCompactionTimestampRespo" +
-      "nse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Sec" +
-      "urityCapabilitiesRequest\"\354\001\n\034SecurityCap",
-      "abilitiesResponse\022G\n\014capabilities\030\001 \003(\0162" +
-      "1.hbase.pb.SecurityCapabilitiesResponse." +
-      "Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTH" +
-      "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022" +
-      "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" +
-      "\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchT" +
-      "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\373(\n\rMasterServ" +
-      "ice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb.G" +
-      "etSchemaAlterStatusRequest\032&.hbase.pb.Ge" +
-      "tSchemaAlterStatusResponse\022b\n\023GetTableDe",
-      "scriptors\022$.hbase.pb.GetTableDescriptors" +
-      "Request\032%.hbase.pb.GetTableDescriptorsRe" +
-      "sponse\022P\n\rGetTableNames\022\036.hbase.pb.GetTa" +
-      "bleNamesRequest\032\037.hbase.pb.GetTableNames" +
-      "Response\022Y\n\020GetClusterStatus\022!.hbase.pb." +
-      "GetClusterStatusRequest\032\".hbase.pb.GetCl" +
-      "usterStatusResponse\022V\n\017IsMasterRunning\022 " +
-      ".hbase.pb.IsMasterRunningRequest\032!.hbase" +
-      ".pb.IsMasterRunningResponse\022D\n\tAddColumn" +
-      "\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb.",
-      "AddColumnResponse\022M\n\014DeleteColumn\022\035.hbas" +
-      "e.pb.DeleteColumnRequest\032\036.hbase.pb.Dele" +
-      "teColumnResponse\022M\n\014ModifyColumn\022\035.hbase" +
-      ".pb.ModifyColumnRequest\032\036.hbase.pb.Modif" +
-      "yColumnResponse\022G\n\nMoveRegion\022\033.hbase.pb" +
-      ".MoveRegionRequest\032\034.hbase.pb.MoveRegion" +
-      "Response\022k\n\026DispatchMergingRegions\022\'.hba" +
-      "se.pb.DispatchMergingRegionsRequest\032(.hb" +
-      "ase.pb.DispatchMergingRegionsResponse\022M\n" +
-      "\014AssignRegion\022\035.hbase.pb.AssignRegionReq",
-      "uest\032\036.hbase.pb.AssignRegionResponse\022S\n\016" +
-      "UnassignRegion\022\037.hbase.pb.UnassignRegion" +
-      "Request\032 .hbase.pb.UnassignRegionRespons" +
-      "e\022P\n\rOfflineRegion\022\036.hbase.pb.OfflineReg" +
-      "ionRequest\032\037.hbase.pb.OfflineRegionRespo" +
-      "nse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteTabl" +
-      "eRequest\032\035.hbase.pb.DeleteTableResponse\022" +
-      "P\n\rtruncateTable\022\036.hbase.pb.TruncateTabl" +
-      "eRequest\032\037.hbase.pb.TruncateTableRespons" +
-      "e\022J\n\013EnableTable\022\034.hbase.pb.EnableTableR",
-      "equest\032\035.hbase.pb.EnableTableResponse\022M\n" +
-      "\014DisableTable\022\035.hbase.pb.DisableTableReq" +
-      "uest\032\036.hbase.pb.DisableTableResponse\022J\n\013" +
-      "ModifyTable\022\034.hbase.pb.ModifyTableReques" +
-      "t\032\035.hbase.pb.ModifyTableResponse\022J\n\013Crea" +
-      "teTable\022\034.hbase.pb.CreateTableRequest\032\035." +
-      "hbase.pb.CreateTableResponse\022A\n\010Shutdown" +
-      "\022\031.hbase.pb.ShutdownRequest\032\032.hbase.pb.S" +
-      "hutdownResponse\022G\n\nStopMaster\022\033.hbase.pb" +
-      ".StopMasterRequest\032\034.hbase.pb.StopMaster",
-      "Response\022>\n\007Balance\022\030.hbase.pb.BalanceRe" +
-      "quest\032\031.hbase.pb.BalanceResponse\022_\n\022SetB" +
-      "alancerRunning\022#.hbase.pb.SetBalancerRun" +
-      "ningRequest\032$.hbase.pb.SetBalancerRunnin" +
-      "gResponse\022\\\n\021IsBalancerEnabled\022\".hbase.p" +
-      "b.IsBalancerEnabledRequest\032#.hbase.pb.Is" +
-      "BalancerEnabledResponse\022k\n\026SetSplitOrMer" +
-      "geEnabled\022\'.hbase.pb.SetSplitOrMergeEnab" +
-      "ledRequest\032(.hbase.pb.SetSplitOrMergeEna" +
-      "bledResponse\022h\n\025IsSplitOrMergeEnabled\022&.",
-      "hbase.pb.IsSplitOrMergeEnabledRequest\032\'." +
-      "hbase.pb.IsSplitOrMergeEnabledResponse\022\217" +
-      "\001\n\"ReleaseSplitOrMergeLockAndRollback\0223." +
-      "hbase.pb.ReleaseSplitOrMergeLockAndRollb" +
-      "ackRequest\0324.hbase.pb.ReleaseSplitOrMerg" +
-      "eLockAndRollbackResponse\022D\n\tNormalize\022\032." +
-      "hbase.pb.NormalizeRequest\032\033.hbase.pb.Nor" +
-      "malizeResponse\022e\n\024SetNormalizerRunning\022%" +
-      ".hbase.pb.SetNormalizerRunningRequest\032&." +
-      "hbase.pb.SetNormalizerRunningResponse\022b\n",
-      "\023IsNormalizerEnabled\022$.hbase.pb.IsNormal" +
-      "izerEnabledRequest\032%.hbase.pb.IsNormaliz" +
-      "erEnabledResponse\022S\n\016RunCatalogScan\022\037.hb" +
-      "ase.pb.RunCatalogScanRequest\032 .hbase.pb." +
-      "RunCatalogScanResponse\022e\n\024EnableCatalogJ" +
-      "anitor\022%.hbase.pb.EnableCatalogJanitorRe" +
-      "quest\032&.hbase.pb.EnableCatalogJanitorRes" +
-      "ponse\022n\n\027IsCatalogJanitorEnabled\022(.hbase" +
-      ".pb.IsCatalogJanitorEnabledRequest\032).hba" +
-      "se.pb.IsCatalogJanitorEnabledResponse\022^\n",
-      "\021ExecMasterService\022#.hbase.pb.Coprocesso" +
-      "rServiceRequest\032$.hbase.pb.CoprocessorSe" +
-      "rviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sna" +
-      "pshotRequest\032\032.hbase.pb.SnapshotResponse" +
-      "\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Get" +
-      "CompletedSnapshotsRequest\032\'.hbase.pb.Get" +
-      "CompletedSnapshotsResponse\022S\n\016DeleteSnap" +
-      "shot\022\037.hbase.pb.DeleteSnapshotRequest\032 ." +
-      "hbase.pb.DeleteSnapshotResponse\022S\n\016IsSna" +
-      "pshotDone\022\037.hbase.pb.IsSnapshotDoneReque",
-      "st\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017" +
-      "RestoreSnapshot\022 .hbase.pb.RestoreSnapsh" +
-      "otRequest\032!.hbase.pb.RestoreSnapshotResp" +
-      "onse\022P\n\rExecProcedure\022\036.hbase.pb.ExecPro" +
-      "cedureRequest\032\037.hbase.pb.ExecProcedureRe" +
-      "sponse\022W\n\024ExecProcedureWithRet\022\036.hbase.p" +
-      "b.ExecProcedureRequest\032\037.hbase.pb.ExecPr" +
-      "ocedureResponse\022V\n\017IsProcedureDone\022 .hba" +
-      "se.pb.IsProcedureDoneRequest\032!.hbase.pb." +
-      "IsProcedureDoneResponse\022V\n\017ModifyNamespa",
-      "ce\022 .hbase.pb.ModifyNamespaceRequest\032!.h" +
-      "base.pb.ModifyNamespaceResponse\022V\n\017Creat" +
-      "eNamespace\022 .hbase.pb.CreateNamespaceReq" +
-      "uest\032!.hbase.pb.CreateNamespaceResponse\022" +
-      "V\n\017DeleteNamespace\022 .hbase.pb.DeleteName" +
-      "spaceRequest\032!.hbase.pb.DeleteNamespaceR" +
-      "esponse\022k\n\026GetNamespaceDescriptor\022\'.hbas" +
-      "e.pb.GetNamespaceDescriptorRequest\032(.hba" +
-      "se.pb.GetNamespaceDescriptorResponse\022q\n\030" +
-      "ListNamespaceDescriptors\022).hbase.pb.List",
-      "NamespaceDescriptorsRequest\032*.hbase.pb.L" +
-      "istNamespaceDescriptorsResponse\022\206\001\n\037List" +
-      "TableDescriptorsByNamespace\0220.hbase.pb.L" +
-      "istTableDescriptorsByNamespaceRequest\0321." +
-      "hbase.pb.ListTableDescriptorsByNamespace" +
-      "Response\022t\n\031ListTableNamesByNamespace\022*." +
-      "hbase.pb.ListTableNamesByNamespaceReques" +
-      "t\032+.hbase.pb.ListTableNamesByNamespaceRe" +
-      "sponse\022P\n\rGetTableState\022\036.hbase.pb.GetTa" +
-      "bleStateRequest\032\037.hbase.pb.GetTableState",
-      "Response\022A\n\010SetQuota\022\031.hbase.pb.SetQuota" +
-      "Request\032\032.hbase.pb.SetQuotaResponse\022x\n\037g" +
-      "etLastMajorCompactionTimestamp\022).hbase.p" +
-      "b.MajorCompactionTimestampRequest\032*.hbas" +
-      "e.pb.MajorCompactionTimestampResponse\022\212\001" +
-      "\n(getLastMajorCompactionTimestampForRegi" +
-      "on\0222.hbase.pb.MajorCompactionTimestampFo" +
-      "rRegionRequest\032*.hbase.pb.MajorCompactio" +
-      "nTimestampResponse\022_\n\022getProcedureResult" +
-      "\022#.hbase.pb.GetProcedureResultRequest\032$.",
-      "hbase.pb.GetProcedureResultResponse\022h\n\027g" +
-      "etSecurityCapabilities\022%.hbase.pb.Securi" +
-      "tyCapabilitiesRequest\032&.hbase.pb.Securit" +
-      "yCapabilitiesResponse\022S\n\016AbortProcedure\022" +
-      "\037.hbase.pb.AbortProcedureRequest\032 .hbase" +
-      ".pb.AbortProcedureResponse\022S\n\016ListProced" +
-      "ures\022\037.hbase.pb.ListProceduresRequest\032 ." +
-      "hbase.pb.ListProceduresResponseBB\n*org.a" +
-      "pache.hadoop.hbase.protobuf.generatedB\014M" +
-      "asterProtosH\001\210\001\001\240\001\001"
+      "asterResponse\"\034\n\032IsInMaintenanceModeRequ" +
+      "est\"8\n\033IsInMaintenanceModeResponse\022\031\n\021in" +
+      "MaintenanceMode\030\001 \002(\010\"\037\n\016BalanceRequest\022" +
+      "\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014bal" +
+      "ancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunningReq",
+      "uest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n" +
+      "\032SetBalancerRunningResponse\022\032\n\022prev_bala" +
+      "nce_value\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequ" +
+      "est\",\n\031IsBalancerEnabledResponse\022\017\n\007enab" +
+      "led\030\001 \002(\010\"\212\001\n\035SetSplitOrMergeEnabledRequ" +
+      "est\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(" +
+      "\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb.Maste" +
+      "rSwitchType\022\021\n\tskip_lock\030\004 \001(\010\"4\n\036SetSpl" +
+      "itOrMergeEnabledResponse\022\022\n\nprev_value\030\001" +
+      " \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022/\n\013",
+      "switch_type\030\001 \002(\0162\032.hbase.pb.MasterSwitc" +
+      "hType\"0\n\035IsSplitOrMergeEnabledResponse\022\017" +
+      "\n\007enabled\030\001 \002(\010\"+\n)ReleaseSplitOrMergeLo" +
+      "ckAndRollbackRequest\",\n*ReleaseSplitOrMe" +
+      "rgeLockAndRollbackResponse\"\022\n\020NormalizeR" +
+      "equest\"+\n\021NormalizeResponse\022\026\n\016normalize" +
+      "r_ran\030\001 \002(\010\")\n\033SetNormalizerRunningReque" +
+      "st\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizerRunningRe" +
+      "sponse\022\035\n\025prev_normalizer_value\030\001 \001(\010\"\034\n" +
+      "\032IsNormalizerEnabledRequest\".\n\033IsNormali",
+      "zerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025R" +
+      "unCatalogScanRequest\"-\n\026RunCatalogScanRe" +
+      "sponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableCat" +
+      "alogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034En" +
+      "ableCatalogJanitorResponse\022\022\n\nprev_value" +
+      "\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledRequest" +
+      "\"0\n\037IsCatalogJanitorEnabledResponse\022\r\n\005v" +
+      "alue\030\001 \002(\010\"B\n\017SnapshotRequest\022/\n\010snapsho" +
+      "t\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\"," +
+      "\n\020SnapshotResponse\022\030\n\020expected_timeout\030\001",
+      " \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"Q\n\035" +
+      "GetCompletedSnapshotsResponse\0220\n\tsnapsho" +
+      "ts\030\001 \003(\0132\035.hbase.pb.SnapshotDescription\"" +
+      "H\n\025DeleteSnapshotRequest\022/\n\010snapshot\030\001 \002" +
+      "(\0132\035.hbase.pb.SnapshotDescription\"\030\n\026Del" +
+      "eteSnapshotResponse\"s\n\026RestoreSnapshotRe" +
+      "quest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snaps" +
+      "hotDescription\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020" +
+      "\n\005nonce\030\003 \001(\004:\0010\"*\n\027RestoreSnapshotRespo" +
+      "nse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRe",
+      "quest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snaps" +
+      "hotDescription\"^\n\026IsSnapshotDoneResponse" +
+      "\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132" +
+      "\035.hbase.pb.SnapshotDescription\"O\n\034IsRest" +
+      "oreSnapshotDoneRequest\022/\n\010snapshot\030\001 \001(\013" +
+      "2\035.hbase.pb.SnapshotDescription\"4\n\035IsRes" +
+      "toreSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005" +
+      "false\"F\n\033GetSchemaAlterStatusRequest\022\'\n\n" +
+      "table_name\030\001 \002(\0132\023.hbase.pb.TableName\"T\n" +
+      "\034GetSchemaAlterStatusResponse\022\035\n\025yet_to_",
+      "update_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 " +
+      "\001(\r\"\213\001\n\032GetTableDescriptorsRequest\022(\n\013ta" +
+      "ble_names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005" +
+      "regex\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:" +
+      "\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDes" +
+      "criptorsResponse\022+\n\014table_schema\030\001 \003(\0132\025" +
+      ".hbase.pb.TableSchema\"[\n\024GetTableNamesRe" +
+      "quest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tabl" +
+      "es\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025Ge" +
+      "tTableNamesResponse\022(\n\013table_names\030\001 \003(\013",
+      "2\023.hbase.pb.TableName\"?\n\024GetTableStateRe" +
+      "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" +
+      "leName\"B\n\025GetTableStateResponse\022)\n\013table" +
+      "_state\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027Ge" +
+      "tClusterStatusRequest\"K\n\030GetClusterStatu" +
+      "sResponse\022/\n\016cluster_status\030\001 \002(\0132\027.hbas" +
+      "e.pb.ClusterStatus\"\030\n\026IsMasterRunningReq" +
+      "uest\"4\n\027IsMasterRunningResponse\022\031\n\021is_ma" +
+      "ster_running\030\001 \002(\010\"I\n\024ExecProcedureReque" +
+      "st\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb.Procedu",
+      "reDescription\"F\n\025ExecProcedureResponse\022\030" +
+      "\n\020expected_timeout\030\001 \001(\003\022\023\n\013return_data\030" +
+      "\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221\n\tproce" +
+      "dure\030\001 \001(\0132\036.hbase.pb.ProcedureDescripti" +
+      "on\"`\n\027IsProcedureDoneResponse\022\023\n\004done\030\001 " +
+      "\001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb." +
+      "ProcedureDescription\",\n\031GetProcedureResu" +
+      "ltRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProced" +
+      "ureResultResponse\0229\n\005state\030\001 \002(\0162*.hbase" +
+      ".pb.GetProcedureResultResponse.State\022\022\n\n",
+      "start_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n" +
+      "\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase" +
+      ".pb.ForeignExceptionMessage\"1\n\005State\022\r\n\t" +
+      "NOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M" +
+      "\n\025AbortProcedureRequest\022\017\n\007proc_id\030\001 \002(\004" +
+      "\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004true\"6\n" +
+      "\026AbortProcedureResponse\022\034\n\024is_procedure_" +
+      "aborted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"@" +
+      "\n\026ListProceduresResponse\022&\n\tprocedure\030\001 " +
+      "\003(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaReq",
+      "uest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 " +
+      "\001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001" +
+      "(\0132\023.hbase.pb.TableName\022\022\n\nremove_all\030\005 " +
+      "\001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle\030" +
+      "\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020SetQ" +
+      "uotaResponse\"J\n\037MajorCompactionTimestamp" +
+      "Request\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.T" +
+      "ableName\"U\n(MajorCompactionTimestampForR" +
+      "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
+      "RegionSpecifier\"@\n MajorCompactionTimest",
+      "ampResponse\022\034\n\024compaction_timestamp\030\001 \002(" +
+      "\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034Sec" +
+      "urityCapabilitiesResponse\022G\n\014capabilitie" +
+      "s\030\001 \003(\01621.hbase.pb.SecurityCapabilitiesR" +
+      "esponse.Capability\"\202\001\n\nCapability\022\031\n\025SIM" +
+      "PLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTIC" +
+      "ATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHO" +
+      "RIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020Maste" +
+      "rSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\345)\n\rMa" +
+      "sterService\022e\n\024GetSchemaAlterStatus\022%.hb",
+      "ase.pb.GetSchemaAlterStatusRequest\032&.hba" +
+      "se.pb.GetSchemaAlterStatusResponse\022b\n\023Ge" +
+      "tTableDescriptors\022$.hbase.pb.GetTableDes" +
+      "criptorsRequest\032%.hbase.pb.GetTableDescr" +
+      "iptorsResponse\022P\n\rGetTableNames\022\036.hbase." +
+      "pb.GetTableNamesRequest\032\037.hbase.pb.GetTa" +
+      "bleNamesResponse\022Y\n\020GetClusterStatus\022!.h" +
+      "base.pb.GetClusterStatusRequest\032\".hbase." +
+      "pb.GetClusterStatusResponse\022V\n\017IsMasterR" +
+      "unning\022 .hbase.pb.IsMasterRunningRequest",
+      "\032!.hbase.pb.IsMasterRunningResponse\022D\n\tA" +
+      "ddColumn\022\032.hbase.pb.AddColumnRequest\032\033.h" +
+      "base.pb.AddColumnResponse\022M\n\014DeleteColum" +
+      "n\022\035.hbase.pb.DeleteColumnRequest\032\036.hbase" +
+      ".pb.DeleteColumnResponse\022M\n\014ModifyColumn" +
+      "\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase." +
+      "pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033." +
+      "hbase.pb.MoveRegionRequest\032\034.hbase.pb.Mo" +
+      "veRegionResponse\022k\n\026DispatchMergingRegio" +
+      "ns\022\'.hbase.pb.DispatchMergingRegionsRequ",
+      "est\032(.hbase.pb.DispatchMergingRegionsRes" +
+      "ponse\022M\n\014AssignRegion\022\035.hbase.pb.AssignR" +
+      "egionRequest\032\036.hbase.pb.AssignRegionResp" +
+      "onse\022S\n\016UnassignRegion\022\037.hbase.pb.Unassi" +
+      "gnRegionRequest\032 .hbase.pb.UnassignRegio" +
+      "nResponse\022P\n\rOfflineRegion\022\036.hbase.pb.Of" +
+      "flineRegionRequest\032\037.hbase.pb.OfflineReg" +
+      "ionResponse\022J\n\013DeleteTable\022\034.hbase.pb.De" +
+      "leteTableRequest\032\035.hbase.pb.DeleteTableR" +
+      "esponse\022P\n\rtruncateTable\022\036.hbase.pb.Trun",
+      "cateTableRequest\032\037.hbase.pb.TruncateTabl" +
+      "eResponse\022J\n\013EnableTable\022\034.hbase.pb.Enab" +
+      "leTableRequest\032\035.hbase.pb.EnableTableRes" +
+      "ponse\022M\n\014DisableTable\022\035.hbase.pb.Disable" +
+      "TableRequest\032\036.hbase.pb.DisableTableResp" +
+      "onse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTab" +
+      "leRequest\032\035.hbase.pb.ModifyTableResponse" +
+      "\022J\n\013CreateTable\022\034.hbase.pb.CreateTableRe" +
+      "quest\032\035.hbase.pb.CreateTableResponse\022A\n\010" +
+      "Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.hb",
+      "ase.pb.ShutdownResponse\022G\n\nStopMaster\022\033." +
+      "hbase.pb.StopMasterRequest\032\034.hbase.pb.St" +
+      "opMasterResponse\022h\n\031IsMasterInMaintenanc" +
+      "eMode\022$.hbase.pb.IsInMaintenanceModeRequ" +
+      "est\032%.hbase.pb.IsInMaintenanceModeRespon" +
+      "se\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032" +
+      "\031.hbase.pb.BalanceResponse\022_\n\022SetBalance" +
+      "rRunning\022#.hbase.pb.SetBalancerRunningRe" +
+      "quest\032$.hbase.pb.SetBalancerRunningRespo" +
+      "nse\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBa",
+      "lancerEnabledRequest\032#.hbase.pb.IsBalanc" +
+      "erEnabledResponse\022k\n\026SetSplitOrMergeEnab" +
+      "led\022\'.hbase.pb.SetSplitOrMergeEnabledReq" +
+      "uest\032(.hbase.pb.SetSplitOrMergeEnabledRe" +
+      "sponse\022h\n\025IsSplitOrMergeEnabled\022&.hbase." +
+      "pb.IsSplitOrMergeEnabledRequest\032\'.hbase." +
+      "pb.IsSplitOrMergeEnabledResponse\022\217\001\n\"Rel" +
+      "easeSplitOrMergeLockAndRollback\0223.hbase." +
+      "pb.ReleaseSplitOrMergeLockAndRollbackReq" +
+      "uest\0324.hbase.pb.ReleaseSplitOrMergeLockA",
+      "ndRollbackResponse\022D\n\tNormalize\022\032.hbase." +
+      "pb.NormalizeRequest\032\033.hbase.pb.Normalize" +
+      "Response\022e\n\024SetNormalizerRunning\022%.hbase" +
+      ".pb.SetNormalizerRunningRequest\032&.hbase." +
+      "pb.SetNormalizerRunningResponse\022b\n\023IsNor" +
+      "malizerEnabled\022$.hbase.pb.IsNormalizerEn" +
+      "abledRequest\032%.hbase.pb.IsNormalizerEnab" +
+      "ledResponse\022S\n\016RunCatalogScan\022\037.hbase.pb" +
+      ".RunCatalogScanRequest\032 .hbase.pb.RunCat" +
+      "alogScanResponse\022e\n\024EnableCatalogJanitor",
+      "\022%.hbase.pb.EnableCatalogJanitorRequest\032" +
+      "&.hbase.pb.EnableCatalogJanitorResponse\022" +
+      "n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.Is" +
+      "CatalogJanitorEnabledRequest\032).hbase.pb." +
+      "IsCatalogJanitorEnabledResponse\022^\n\021ExecM" +
+      "asterService\022#.hbase.pb.CoprocessorServi" +
+      "ceRequest\032$.hbase.pb.CoprocessorServiceR" +
+      "esponse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotR" +
+      "equest\032\032.hbase.pb.SnapshotResponse\022h\n\025Ge" +
+      "tCompletedSnapshots\022&.hbase.pb.GetComple",
+      "tedSnapshotsRequest\032\'.hbase.pb.GetComple" +
+      "tedSnapshotsResponse\022S\n\016DeleteSnapshot\022\037" +
+      ".hbase.pb.DeleteSnapshotRequest\032 .hbase." +
+      "pb.DeleteSnapshotResponse\022S\n\016IsSnapshotD" +
+      "one\022\037.hbase.pb.IsSnapshotDoneRequest\032 .h" +
+      "base.pb.IsSnapshotDoneResponse\022V\n\017Restor" +
+      "eSnapshot\022 .hbase.pb.RestoreSnapshotRequ" +
+      "est\032!.hbase.pb.RestoreSnapshotResponse\022P" +
+      "\n\rExecProcedure\022\036.hbase.pb.ExecProcedure" +
+      "Request\032\037.hbase.pb.ExecProcedureResponse",
+      "\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exec" +
+      "ProcedureRequest\032\037.hbase.pb.ExecProcedur" +
+      "eResponse\022V\n\017IsProcedureDone\022 .hbase.pb." +
+      "IsProcedureDoneRequest\032!.hbase.pb.IsProc" +
+      "edureDoneResponse\022V\n\017ModifyNamespace\022 .h" +
+      "base.pb.ModifyNamespaceRequest\032!.hbase.p" +
+      "b.ModifyNamespaceResponse\022V\n\017CreateNames" +
+      "pace\022 .hbase.pb.CreateNamespaceRequest\032!" +
+      ".hbase.pb.CreateNamespaceResponse\022V\n\017Del" +
+      "eteNamespace\022 .hbase.pb.DeleteNamespaceR",
+      "equest\032!.hbase.pb.DeleteNamespaceRespons" +
+      "e\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb.G" +
+      "etNamespaceDescriptorRequest\032(.hbase.pb." +
+      "GetNamespaceDescriptorResponse\022q\n\030ListNa" +
+      "mespaceDescriptors\022).hbase.pb.ListNamesp" +
+      "aceDescriptorsRequest\032*.hbase.pb.ListNam" +
+      "espaceDescriptorsResponse\022\206\001\n\037ListTableD" +
+      "escriptorsByNamespace\0220.hbase.pb.ListTab" +
+      "leDescriptorsByNamespaceRequest\0321.hbase." +
+      "pb.ListTableDescriptorsByNamespaceRespon",
+      "se\022t\n\031ListTableNamesByNamespace\022*.hbase." +
+      "pb.ListTableNamesByNamespaceRequest\032+.hb" +
+      "ase.pb.ListTableNamesByNamespaceResponse" +
+      "\022P\n\rGetTableState\022\036.hbase.pb.GetTableSta" +
+      "teRequest\032\037.hbase.pb.GetTableStateRespon" +
+      "se\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReques" +
+      "t\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLast" +
+      "MajorCompactionTimestamp\022).hbase.pb.Majo" +
+      "rCompactionTimestampRequest\032*.hbase.pb.M" +
+      "ajorCompactionTimestampResponse\022\212\001\n(getL",
+      "astMajorCompactionTimestampForRegion\0222.h" +
+      "base.pb.MajorCompactionTimestampForRegio" +
+      "nRequest\032*.hbase.pb.MajorCompactionTimes" +
+      "tampResponse\022_\n\022getProcedureResult\022#.hba" +
+      "se.pb.GetProcedureResultRequest\032$.hbase." +
+      "pb.GetProcedureResultResponse\022h\n\027getSecu" +
+      "rityCapabilities\022%.hbase.pb.SecurityCapa" +
+      "bilitiesRequest\032&.hbase.pb.SecurityCapab" +
+      "ilitiesResponse\022S\n\016AbortProcedure\022\037.hbas" +
+      "e.pb.AbortProcedureRequest\032 .hbase.pb.Ab",
+      "ortProcedureResponse\022S\n\016ListProcedures\022\037" +
+      ".hbase.pb.ListProceduresRequest\032 .hbase." +
+      "pb.ListProceduresResponseBB\n*org.apache." +
+      "hadoop.hbase.protobuf.generatedB\014MasterP" +
+      "rotosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -66163,392 +67039,404 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_StopMasterResponse_descriptor,
               new java.lang.String[] { });
-          internal_static_hbase_pb_BalanceRequest_descriptor =
+          internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor =
             getDescriptor().getMessageTypes().get(46);
+          internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor,
+              new java.lang.String[] { });
+          internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor =
+            getDescriptor().getMessageTypes().get(47);
+          internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor,
+              new java.lang.String[] { "InMaintenanceMode", });
+          internal_static_hbase_pb_BalanceRequest_descriptor =
+            getDescriptor().getMessageTypes().get(48);
           internal_static_hbase_pb_BalanceRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BalanceRequest_descriptor,
               new java.lang.String[] { "Force", });
           internal_static_hbase_pb_BalanceResponse_descriptor =
-            getDescriptor().getMessageTypes().get(47);
+            getDescriptor().getMessageTypes().get(49);
           internal_static_hbase_pb_BalanceResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_BalanceResponse_descriptor,
               new java.lang.String[] { "BalancerRan", });
           internal_static_hbase_pb_SetBalancerRunningRequest_descriptor =
-            getDescriptor().getMessageTypes().get(48);
+            getDescriptor().getMessageTypes().get(50);
           internal_static_hbase_pb_SetBalancerRunningRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetBalancerRunningRequest_descriptor,
               new java.lang.String[] { "On", "Synchronous", });
           internal_static_hbase_pb_SetBalancerRunningResponse_descriptor =
-            getDescriptor().getMessageTypes().get(49);
+            getDescriptor().getMessageTypes().get(51);
           internal_static_hbase_pb_SetBalancerRunningResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetBalancerRunningResponse_descriptor,
               new java.lang.String[] { "PrevBalanceValue", });
           internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor =
-            getDescriptor().getMessageTypes().get(50);
+            getDescriptor().getMessageTypes().get(52);
           internal_static_hbase_pb_IsBalancerEnabledRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor =
-            getDescriptor().getMessageTypes().get(51);
+            getDescriptor().getMessageTypes().get(53);
           internal_static_hbase_pb_IsBalancerEnabledResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor,
               new java.lang.String[] { "Enabled", });
           internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor =
-            getDescriptor().getMessageTypes().get(52);
+            getDescriptor().getMessageTypes().get(54);
           internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor,
               new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", "SkipLock", });
           internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor =
-            getDescriptor().getMessageTypes().get(53);
+            getDescriptor().getMessageTypes().get(55);
           internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor,
               new java.lang.String[] { "PrevValue", });
           internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor =
-            getDescriptor().getMessageTypes().get(54);
+            getDescriptor().getMessageTypes().get(56);
           internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor,
               new java.lang.String[] { "SwitchType", });
           internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor =
-            getDescriptor().getMessageTypes().get(55);
+            getDescriptor().getMessageTypes().get(57);
           internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor,
               new java.lang.String[] { "Enabled", });
           internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor =
-            getDescriptor().getMessageTypes().get(56);
+            getDescriptor().getMessageTypes().get(58);
           internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor =
-            getDescriptor().getMessageTypes().get(57);
+            getDescriptor().getMessageTypes().get(59);
           internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_NormalizeRequest_descriptor =
-            getDescriptor().getMessageTypes().get(58);
+            getDescriptor().getMessageTypes().get(60);
           internal_static_hbase_pb_NormalizeRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NormalizeRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_NormalizeResponse_descriptor =
-            getDescriptor().getMessageTypes().get(59);
+            getDescriptor().getMessageTypes().get(61);
           internal_static_hbase_pb_NormalizeResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_NormalizeResponse_descriptor,
               new java.lang.String[] { "NormalizerRan", });
           internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor =
-            getDescriptor().getMessageTypes().get(60);
+            getDescriptor().getMessageTypes().get(62);
           internal_static_hbase_pb_SetNormalizerRunningRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor,
               new java.lang.String[] { "On", });
           internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor =
-            getDescriptor().getMessageTypes().get(61);
+            getDescriptor().getMessageTypes().get(63);
           internal_static_hbase_pb_SetNormalizerRunningResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor,
               new java.lang.String[] { "PrevNormalizerValue", });
           internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor =
-            getDescriptor().getMessageTypes().get(62);
+            getDescriptor().getMessageTypes().get(64);
           internal_static_hbase_pb_IsNormalizerEnabledRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor =
-            getDescriptor().getMessageTypes().get(63);
+            getDescriptor().getMessageTypes().get(65);
           internal_static_hbase_pb_IsNormalizerEnabledResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor,
               new java.lang.String[] { "Enabled", });
           internal_static_hbase_pb_RunCatalogScanRequest_descriptor =
-            getDescriptor().getMessageTypes().get(64);
+            getDescriptor().getMessageTypes().get(66);
           internal_static_hbase_pb_RunCatalogScanRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RunCatalogScanRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_RunCatalogScanResponse_descriptor =
-            getDescriptor().getMessageTypes().get(65);
+            getDescriptor().getMessageTypes().get(67);
           internal_static_hbase_pb_RunCatalogScanResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RunCatalogScanResponse_descriptor,
               new java.lang.String[] { "ScanResult", });
           internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor =
-            getDescriptor().getMessageTypes().get(66);
+            getDescriptor().getMessageTypes().get(68);
           internal_static_hbase_pb_EnableCatalogJanitorRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor,
               new java.lang.String[] { "Enable", });
           internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor =
-            getDescriptor().getMessageTypes().get(67);
+            getDescriptor().getMessageTypes().get(69);
           internal_static_hbase_pb_EnableCatalogJanitorResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor,
               new java.lang.String[] { "PrevValue", });
           internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor =
-            getDescriptor().getMessageTypes().get(68);
+            getDescriptor().getMessageTypes().get(70);
           internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor =
-            getDescriptor().getMessageTypes().get(69);
+            getDescriptor().getMessageTypes().get(71);
           internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor,
               new java.lang.String[] { "Value", });
           internal_static_hbase_pb_SnapshotRequest_descriptor =
-            getDescriptor().getMessageTypes().get(70);
+            getDescriptor().getMessageTypes().get(72);
           internal_static_hbase_pb_SnapshotRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SnapshotRequest_descriptor,
               new java.lang.String[] { "Snapshot", });
           internal_static_hbase_pb_SnapshotResponse_descriptor =
-            getDescriptor().getMessageTypes().get(71);
+            getDescriptor().getMessageTypes().get(73);
           internal_static_hbase_pb_SnapshotResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SnapshotResponse_descriptor,
               new java.lang.String[] { "ExpectedTimeout", });
           internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor =
-            getDescriptor().getMessageTypes().get(72);
+            getDescriptor().getMessageTypes().get(74);
           internal_static_hbase_pb_GetCompletedSnapshotsRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor =
-            getDescriptor().getMessageTypes().get(73);
+            getDescriptor().getMessageTypes().get(75);
           internal_static_hbase_pb_GetCompletedSnapshotsResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor,
               new java.lang.String[] { "Snapshots", });
           internal_static_hbase_pb_DeleteSnapshotRequest_descriptor =
-            getDescriptor().getMessageTypes().get(74);
+            getDescriptor().getMessageTypes().get(76);
           internal_static_hbase_pb_DeleteSnapshotRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_DeleteSnapshotRequest_descriptor,
               new java.lang.String[] { "Snapshot", });
           internal_static_hbase_pb_DeleteSnapshotResponse_descriptor =
-            getDescriptor().getMessageTypes().get(75);
+            getDescriptor().getMessageTypes().get(77);
           internal_static_hbase_pb_DeleteSnapshotResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_DeleteSnapshotResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_RestoreSnapshotRequest_descriptor =
-            getDescriptor().getMessageTypes().get(76);
+            getDescriptor().getMessageTypes().get(78);
           internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RestoreSnapshotRequest_descriptor,
               new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", });
           internal_static_hbase_pb_RestoreSnapshotResponse_descriptor =
-            getDescriptor().getMessageTypes().get(77);
+            getDescriptor().getMessageTypes().get(79);
           internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RestoreSnapshotResponse_descriptor,
               new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor =
-            getDescriptor().getMessageTypes().get(78);
+            getDescriptor().getMessageTypes().get(80);
           internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor,
               new java.lang.String[] { "Snapshot", });
           internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor =
-            getDescriptor().getMessageTypes().get(79);
+            getDescriptor().getMessageTypes().get(81);
           internal_static_hbase_pb_IsSnapshotDoneResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor,
               new java.lang.String[] { "Done", "Snapshot", });
           internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor =
-            getDescriptor().getMessageTypes().get(80);
+            getDescriptor().getMessageTypes().get(82);
           internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor,
               new java.lang.String[] { "Snapshot", });
           internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor =
-            getDescriptor().getMessageTypes().get(81);
+            getDescriptor().getMessageTypes().get(83);
           internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor,
               new java.lang.String[] { "Done", });
           internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor =
-            getDescriptor().getMessageTypes().get(82);
+            getDescriptor().getMessageTypes().get(84);
           internal_static_hbase_pb_GetSchemaAlterStatusRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor,
               new java.lang.String[] { "TableName", });
           internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor =
-            getDescriptor().getMessageTypes().get(83);
+            getDescriptor().getMessageTypes().get(85);
           internal_static_hbase_pb_GetSchemaAlterStatusResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor,
               new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", });
           internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor =
-            getDescriptor().getMessageTypes().get(84);
+            getDescriptor().getMessageTypes().get(86);
           internal_static_hbase_pb_GetTableDescriptorsRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor,
               new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", });
           internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor =
-            getDescriptor().getMessageTypes().get(85);
+            getDescriptor().getMessageTypes().get(87);
           internal_static_hbase_pb_GetTableDescriptorsResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor,
               new java.lang.String[] { "TableSchema", });
           internal_static_hbase_pb_GetTableNamesRequest_descriptor =
-            getDescriptor().getMessageTypes().get(86);
+            getDescriptor().getMessageTypes().get(88);
           internal_static_hbase_pb_GetTableNamesRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableNamesRequest_descriptor,
               new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", });
           internal_static_hbase_pb_GetTableNamesResponse_descriptor =
-            getDescriptor().getMessageTypes().get(87);
+            getDescriptor().getMessageTypes().get(89);
           internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableNamesResponse_descriptor,
               new java.lang.String[] { "TableNames", });
           internal_static_hbase_pb_GetTableStateRequest_descriptor =
-            getDescriptor().getMessageTypes().get(88);
+            getDescriptor().getMessageTypes().get(90);
           internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableStateRequest_descriptor,
               new java.lang.String[] { "TableName", });
           internal_static_hbase_pb_GetTableStateResponse_descriptor =
-            getDescriptor().getMessageTypes().get(89);
+            getDescriptor().getMessageTypes().get(91);
           internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetTableStateResponse_descriptor,
               new java.lang.String[] { "TableState", });
           internal_static_hbase_pb_GetClusterStatusRequest_descriptor =
-            getDescriptor().getMessageTypes().get(90);
+            getDescriptor().getMessageTypes().get(92);
           internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterStatusRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetClusterStatusResponse_descriptor =
-            getDescriptor().getMessageTypes().get(91);
+            getDescriptor().getMessageTypes().get(93);
           internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetClusterStatusResponse_descriptor,
               new java.lang.String[] { "ClusterStatus", });
           internal_static_hbase_pb_IsMasterRunningRequest_descriptor =
-            getDescriptor().getMessageTypes().get(92);
+            getDescriptor().getMessageTypes().get(94);
           internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsMasterRunningRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_IsMasterRunningResponse_descriptor =
-            getDescriptor().getMessageTypes().get(93);
+            getDescriptor().getMessageTypes().get(95);
           internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsMasterRunningResponse_descriptor,
               new java.lang.String[] { "IsMasterRunning", });
           internal_static_hbase_pb_ExecProcedureRequest_descriptor =
-            getDescriptor().getMessageTypes().get(94);
+            getDescriptor().getMessageTypes().get(96);
           internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ExecProcedureRequest_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_ExecProcedureResponse_descriptor =
-            getDescriptor().getMessageTypes().get(95);
+            getDescriptor().getMessageTypes().get(97);
           internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ExecProcedureResponse_descriptor,
               new java.lang.String[] { "ExpectedTimeout", "ReturnData", });
           internal_static_hbase_pb_IsProcedureDoneRequest_descriptor =
-            getDescriptor().getMessageTypes().get(96);
+            getDescriptor().getMessageTypes().get(98);
           internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsProcedureDoneRequest_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_IsProcedureDoneResponse_descriptor =
-            getDescriptor().getMessageTypes().get(97);
+            getDescriptor().getMessageTypes().get(99);
           internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_IsProcedureDoneResponse_descriptor,
               new java.lang.String[] { "Done", "Snapshot", });
           internal_static_hbase_pb_GetProcedureResultRequest_descriptor =
-            getDescriptor().getMessageTypes().get(98);
+            getDescriptor().getMessageTypes().get(100);
           internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetProcedureResultRequest_descriptor,
               new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_GetProcedureResultResponse_descriptor =
-            getDescriptor().getMessageTypes().get(99);
+            getDescriptor().getMessageTypes().get(101);
           internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetProcedureResultResponse_descriptor,
               new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", });
           internal_static_hbase_pb_AbortProcedureRequest_descriptor =
-            getDescriptor().getMessageTypes().get(100);
+            getDescriptor().getMessageTypes().get(102);
           internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_AbortProcedureRequest_descriptor,
               new java.lang.String[] { "ProcId", "MayInterruptIfRunning", });
           internal_static_hbase_pb_AbortProcedureResponse_descriptor =
-            getDescriptor().getMessageTypes().get(101);
+            getDescriptor().getMessageTypes().get(103);
           internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_AbortProcedureResponse_descriptor,
               new java.lang.String[] { "IsProcedureAborted", });
           internal_static_hbase_pb_ListProceduresRequest_descriptor =
-            getDescriptor().getMessageTypes().get(102);
+            getDescriptor().getMessageTypes().get(104);
           internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ListProceduresRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_ListProceduresResponse_descriptor =
-            getDescriptor().getMessageTypes().get(103);
+            getDescriptor().getMessageTypes().get(105);
           internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ListProceduresResponse_descriptor,
               new java.lang.String[] { "Procedure", });
           internal_static_hbase_pb_SetQuotaRequest_descriptor =
-            getDescriptor().getMessageTypes().get(104);
+            getDescriptor().getMessageTypes().get(106);
           internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetQuotaRequest_descriptor,
               new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
           internal_static_hbase_pb_SetQuotaResponse_descriptor =
-            getDescriptor().getMessageTypes().get(105);
+            getDescriptor().getMessageTypes().get(107);
           internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SetQuotaResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor =
-            getDescriptor().getMessageTypes().get(106);
+            getDescriptor().getMessageTypes().get(108);
           internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor,
               new java.lang.String[] { "TableName", });
           internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor =
-            getDescriptor().getMessageTypes().get(107);
+            getDescriptor().getMessageTypes().get(109);
           internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor,
               new java.lang.String[] { "Region", });
           internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor =
-            getDescriptor().getMessageTypes().get(108);
+            getDescriptor().getMessageTypes().get(110);
           internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor,
               new java.lang.String[] { "CompactionTimestamp", });
           internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor =
-            getDescriptor().getMessageTypes().get(109);
+            getDescriptor().getMessageTypes().get(111);
           internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor =
-            getDescriptor().getMessageTypes().get(110);
+            getDescriptor().getMessageTypes().get(112);
           internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor,
diff --git hbase-protocol/src/main/protobuf/Master.proto hbase-protocol/src/main/protobuf/Master.proto
index ad8111e..542a872 100644
--- hbase-protocol/src/main/protobuf/Master.proto
+++ hbase-protocol/src/main/protobuf/Master.proto
@@ -255,6 +255,13 @@ message StopMasterRequest {
 message StopMasterResponse {
 }
 
+message IsInMaintenanceModeRequest {
+}
+
+message IsInMaintenanceModeResponse {
+  required bool inMaintenanceMode = 1;
+}
+
 message BalanceRequest {
   optional bool force = 1;
 }
@@ -645,6 +652,12 @@ service MasterService {
     returns(StopMasterResponse);
 
   /**
+   * Query whether the Master is in maintenance mode.
+   */
+  rpc IsMasterInMaintenanceMode(IsInMaintenanceModeRequest)
+    returns(IsInMaintenanceModeResponse);
+
+  /**
    * Run the balancer.  Will run the balancer and if regions to move, it will
    * go ahead and do the reassignments.  Can NOT run for various reasons.
    * Check logs.
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index c93b307..476c796 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -109,6 +109,7 @@ public class CatalogJanitor extends ScheduledChore {
     try {
       AssignmentManager am = this.services.getAssignmentManager();
       if (this.enabled.get()
+          && !this.services.isInMaintenanceMode()
           && am != null
           && am.isFailoverCleanupDone()
           && am.getRegionStates().getRegionsInTransition().size() == 0) {
@@ -241,6 +242,11 @@ public class CatalogJanitor extends ScheduledChore {
       int mergeCleaned = 0;
       Map mergedRegions = scanTriple.getSecond();
       for (Map.Entry e : mergedRegions.entrySet()) {
+        if (this.services.isInMaintenanceMode()) {
+          // Stop cleaning if the master is in maintenance mode
+          break;
+        }
+
         PairOfSameType p = MetaTableAccessor.getMergeRegions(e.getValue());
         HRegionInfo regionA = p.getFirst();
         HRegionInfo regionB = p.getSecond();
@@ -266,6 +272,11 @@ public class CatalogJanitor extends ScheduledChore {
       // regions whose parents are still around
       HashSet parentNotCleaned = new HashSet();
       for (Map.Entry e : splitParents.entrySet()) {
+        if (this.services.isInMaintenanceMode()) {
+          // Stop cleaning if the master is in maintenance mode
+          break;
+        }
+
         if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
             cleanParent(e.getKey(), e.getValue())) {
           splitCleaned++;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 3803635..ee5d7fb 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -24,7 +24,6 @@ import com.google.common.collect.Maps;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Service;
 
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
@@ -1268,6 +1267,12 @@ public class HMaster extends HRegionServer implements MasterServices {
       LOG.debug("Master has not been initialized, don't run balancer.");
       return false;
     }
+
+    if (isInMaintenanceMode()) {
+      LOG.info("Master is in maintenanceMode mode, don't run balancer.");
+      return false;
+    }
+
     // Do this call outside of synchronized block.
     int maximumBalanceTime = getBalancerCutoffTime();
     synchronized (this.balancer) {
@@ -1371,6 +1376,11 @@ public class HMaster extends HRegionServer implements MasterServices {
       return false;
     }
 
+    if (isInMaintenanceMode()) {
+      LOG.info("Master is in maintenance mode, don't run region normalizer.");
+      return false;
+    }
+
     if (!this.regionNormalizerTracker.isNormalizerOn()) {
       LOG.debug("Region normalization is disabled, don't run region normalizer.");
       return false;
@@ -1384,6 +1394,11 @@ public class HMaster extends HRegionServer implements MasterServices {
       Collections.shuffle(allEnabledTables);
 
       for (TableName table : allEnabledTables) {
+        if (isInMaintenanceMode()) {
+          LOG.debug("Master is in maintenance mode, stop running region normalizer.");
+          return false;
+        }
+
         HTableDescriptor tblDesc = getTableDescriptors().get(table);
         if (table.isSystemTable() || (tblDesc != null &&
             !tblDesc.isNormalizationEnabled())) {
@@ -2364,6 +2379,21 @@ public class HMaster extends HRegionServer implements MasterServices {
     return initialized.isReady();
   }
 
+  /**
+   * Report whether this master is in maintenance mode.
+   *
+   * @return true if master is in maintenanceMode
+   */
+  @Override
+  public boolean isInMaintenanceMode() {
+    try {
+      return ZKUtil.nodeHasChildren(zooKeeper, ZooKeeperWatcher.masterMaintZNode);
+    } catch (KeeperException e) {
+      // Ignore the ZK keeper exception
+      return false;
+    }
+  }
+
   @VisibleForTesting
   public void setInitialized(boolean isInitialized) {
     procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
@@ -2855,7 +2885,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    * @return The state of the switch
    */
   public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
-    if (null == splitOrMergeTracker) {
+    if (null == splitOrMergeTracker || isInMaintenanceMode()) {
       return false;
     }
     return splitOrMergeTracker.isSplitOrMergeEnabled(switchType);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 2f65e97..312648d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1271,6 +1271,15 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
+  public IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+      final RpcController controller,
+      final IsInMaintenanceModeRequest request) throws ServiceException {
+    IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
+    response.setInMaintenanceMode(master.isInMaintenanceMode());
+    return response.build();
+  }
+
+  @Override
   public UnassignRegionResponse unassignRegion(RpcController controller,
       UnassignRegionRequest req) throws ServiceException {
     try {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 21f14e8..f4c9603 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -303,6 +303,11 @@ public interface MasterServices extends Server {
   boolean isInitialized();
 
   /**
+   * @return true if master is in maintanceMode
+   */
+  boolean isInMaintenanceMode();
+
+  /**
    * Abort a procedure.
    * @param procId ID of the procedure
    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index ea91d90..e1a0f85 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
@@ -33,7 +32,6 @@ import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
-import java.net.InetAddress;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -56,13 +54,8 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.io.IOUtils;
@@ -72,12 +65,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -104,7 +95,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
@@ -134,11 +124,8 @@ import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
@@ -200,15 +187,9 @@ public class HBaseFsck extends Configured implements Closeable {
   private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;
   private static final int DEFAULT_MAX_MERGE = 5;
   private static final String TO_BE_LOADED = "to_be_loaded";
-  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";
-  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;
-  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds
-  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds
-  // We have to set the timeout value > HdfsConstants.LEASE_SOFTLIMIT_PERIOD.
-  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for
-  // AlreadyBeingCreatedException which is implies timeout on this operations up to
-  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).
-  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds
+  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;
+  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds
+  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds
 
   /**********************
    * Internal resources
@@ -223,12 +204,6 @@ public class HBaseFsck extends Configured implements Closeable {
   private long startMillis = EnvironmentEdgeManager.currentTime();
   private HFileCorruptionChecker hfcc;
   private int retcode = 0;
-  private Path HBCK_LOCK_PATH;
-  private FSDataOutputStream hbckOutFd;
-  // This lock is to prevent cleanup of balancer resources twice between
-  // ShutdownHook and the main code. We cleanup only if the connect() is
-  // successful
-  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);
 
   /***********
    * Options
@@ -236,8 +211,6 @@ public class HBaseFsck extends Configured implements Closeable {
   private static boolean details = false; // do we display the full report
   private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older
   private static boolean forceExclusive = false; // only this hbck can modify HBase
-  private static boolean disableBalancer = false; // disable load balancer to keep regions stable
-  private static boolean disableSplitAndMerge = false; // disable split and merge
   private boolean fixAssignments = false; // fix assignment errors?
   private boolean fixMeta = false; // fix meta errors?
   private boolean checkHdfs = true; // load and check fs consistency?
@@ -305,11 +278,11 @@ public class HBaseFsck extends Configured implements Closeable {
       new HashMap>();
   private Map tableStates =
       new HashMap();
-  private final RetryCounterFactory lockFileRetryCounterFactory;
-
+  private final RetryCounterFactory createZNodeRetryCounterFactory;
   private Map> skippedRegions = new HashMap>();
 
-  ZooKeeperWatcher zkw = null;
+  private ZooKeeperWatcher zkw = null;
+  private boolean hbckZodeCreated = false;
   /**
    * Constructor
    *
@@ -342,130 +315,63 @@ public class HBaseFsck extends Configured implements Closeable {
     super(conf);
     errors = getErrorReporter(getConf());
     this.executor = exec;
-    lockFileRetryCounterFactory = new RetryCounterFactory(
-      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),
+    createZNodeRetryCounterFactory = new RetryCounterFactory(
+      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),
       getConf().getInt(
-        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),
+        "hbase.hbck.createznode.attempt.sleep.interval",
+        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),
       getConf().getInt(
-        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));
+        "hbase.hbck.createznode.attempt.maxsleeptime",
+        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));
     zkw = createZooKeeperWatcher();
   }
 
-  private class FileLockCallable implements Callable {
-    RetryCounter retryCounter;
-
-    public FileLockCallable(RetryCounter retryCounter) {
-      this.retryCounter = retryCounter;
-    }
-    @Override
-    public FSDataOutputStream call() throws IOException {
+  /**
+   * This method maintains an ephemeral znode. If the creation fails we return false or throw
+   * exception
+   *
+   * @return true if creating znode succeeds; false otherwise
+   * @throws IOException if IO failure occurs
+   */
+  private Boolean checkAndMarkRunningHbck() throws IOException {
+    RetryCounter retryCounter = createZNodeRetryCounterFactory.create();
+    do {
       try {
-        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
-        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
-            HConstants.DATA_FILE_UMASK_KEY);
-        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
-        fs.mkdirs(tmpDir);
-        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
-        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
-        out.writeBytes(InetAddress.getLocalHost().toString());
-        out.flush();
-        return out;
-      } catch(RemoteException e) {
-        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
-          return null;
-        } else {
-          throw e;
+        hbckZodeCreated = ZKUtil.createEphemeralNodeAndWatch(zkw, ZooKeeperWatcher.hbckZNode, null);
+        if (hbckZodeCreated) {
+          break;
         }
+      } catch (KeeperException e) {
+        if (retryCounter.getAttemptTimes() >= retryCounter.getMaxAttempts()) {
+           throw new IOException("Can't create znode " + ZooKeeperWatcher.hbckZNode, e);
+        }
+        // fall through and retry
       }
-    }
 
-    private FSDataOutputStream createFileWithRetries(final FileSystem fs,
-        final Path hbckLockFilePath, final FsPermission defaultPerms)
-        throws IOException {
+      LOG.warn("Fail to create znode " + ZooKeeperWatcher.hbckZNode + ", try=" +
+          (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts());
 
-      IOException exception = null;
-      do {
-        try {
-          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);
-        } catch (IOException ioe) {
-          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()
-              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "
-              + retryCounter.getMaxAttempts());
-          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),
-              ioe);
-          try {
-            exception = ioe;
-            retryCounter.sleepUntilNextRetry();
-          } catch (InterruptedException ie) {
-            throw (InterruptedIOException) new InterruptedIOException(
-                "Can't create lock file " + hbckLockFilePath.getName())
-            .initCause(ie);
-          }
-        }
-      } while (retryCounter.shouldRetry());
-
-      throw exception;
-    }
+      try {
+        retryCounter.sleepUntilNextRetry();
+      } catch (InterruptedException ie) {
+        throw (InterruptedIOException) new InterruptedIOException(
+              "Can't create znode " + ZooKeeperWatcher.hbckZNode).initCause(ie);
+      }
+    } while (retryCounter.shouldRetry());
+    return hbckZodeCreated;
   }
 
-  /**
-   * This method maintains a lock using a file. If the creation fails we return null
-   *
-   * @return FSDataOutputStream object corresponding to the newly opened lock file
-   * @throws IOException if IO failure occurs
-   */
-  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {
-    RetryCounter retryCounter = lockFileRetryCounterFactory.create();
-    FileLockCallable callable = new FileLockCallable(retryCounter);
-    ExecutorService executor = Executors.newFixedThreadPool(1);
-    FutureTask futureTask = new FutureTask(callable);
-    executor.execute(futureTask);
-    final int timeoutInSeconds = getConf().getInt(
-      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);
-    FSDataOutputStream stream = null;
+  private void clenupHbckZnode() {
     try {
-      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);
-    } catch (ExecutionException ee) {
-      LOG.warn("Encountered exception when opening lock file", ee);
-    } catch (InterruptedException ie) {
-      LOG.warn("Interrupted when opening lock file", ie);
-      Thread.currentThread().interrupt();
-    } catch (TimeoutException exception) {
-      // took too long to obtain lock
-      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");
-      futureTask.cancel(true);
-    } finally {
-      executor.shutdownNow();
-    }
-    return stream;
-  }
-
-  private void unlockHbck() {
-    if (isExclusive() && hbckLockCleanup.compareAndSet(true, false)) {
-      RetryCounter retryCounter = lockFileRetryCounterFactory.create();
-      do {
-        try {
-          IOUtils.closeQuietly(hbckOutFd);
-          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),
-              HBCK_LOCK_PATH, true);
-          LOG.info("Finishing hbck");
-          return;
-        } catch (IOException ioe) {
-          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="
-              + (retryCounter.getAttemptTimes() + 1) + " of "
-              + retryCounter.getMaxAttempts());
-          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);
-          try {
-            retryCounter.sleepUntilNextRetry();
-          } catch (InterruptedException ie) {
-            Thread.currentThread().interrupt();
-            LOG.warn("Interrupted while deleting lock file" +
-                HBCK_LOCK_PATH);
-            return;
-          }
-        }
-      } while (retryCounter.shouldRetry());
+      if (zkw != null && hbckZodeCreated) {
+        ZKUtil.deleteNode(zkw, ZooKeeperWatcher.hbckZNode);
+        hbckZodeCreated = false;
+      }
+    } catch (KeeperException e) {
+      // Ignore
+      LOG.warn("Delete HBCK znode " + ZooKeeperWatcher.hbckZNode + " failed ", e);
     }
+    LOG.info("Finishing hbck");
   }
 
   /**
@@ -473,31 +379,25 @@ public class HBaseFsck extends Configured implements Closeable {
    * online state.
    */
   public void connect() throws IOException {
-
     if (isExclusive()) {
-      // Grab the lock
-      hbckOutFd = checkAndMarkRunningHbck();
-      if (hbckOutFd == null) {
+      Boolean success = checkAndMarkRunningHbck();
+      if (!success) {
         setRetCode(-1);
         LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +
-            "[If you are sure no other instance is running, delete the lock file " +
-            HBCK_LOCK_PATH + " and rerun the tool]");
+            "[If you are sure no other instance is running, delete the znode " +
+            ZooKeeperWatcher.hbckZNode + " and rerun the tool]");
         throw new IOException("Duplicate hbck - Abort");
       }
-
-      // Make sure to cleanup the lock
-      hbckLockCleanup.set(true);
     }
 
-
     // Add a shutdown hook to this thread, in case user tries to
-    // kill the hbck with a ctrl-c, we want to cleanup the lock so that
+    // kill the hbck with a ctrl-c, we want to cleanup the znode so that
     // it is available for further calls
     Runtime.getRuntime().addShutdownHook(new Thread() {
       @Override
       public void run() {
         IOUtils.closeQuietly(HBaseFsck.this);
-        unlockHbck();
+        clenupHbckZnode();
       }
     });
 
@@ -684,34 +584,7 @@ public class HBaseFsck extends Configured implements Closeable {
     errors.print("Version: " + status.getHBaseVersion());
     offlineHdfsIntegrityRepair();
 
-    boolean oldBalancer = false;
-    if (shouldDisableBalancer()) {
-      oldBalancer = admin.setBalancerRunning(false, true);
-    }
-    boolean[] oldSplitAndMerge = null;
-    if (shouldDisableSplitAndMerge()) {
-      admin.releaseSplitOrMergeLockAndRollback();
-      oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false,
-        MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
-    }
-
-    try {
-      onlineConsistencyRepair();
-    }
-    finally {
-      // Only restore the balancer if it was true when we started repairing and
-      // we actually disabled it. Otherwise, we might clobber another run of
-      // hbck that has just restored it.
-      if (shouldDisableBalancer() && oldBalancer) {
-        admin.setBalancerRunning(oldBalancer, false);
-      }
-
-      if (shouldDisableSplitAndMerge()) {
-        if (oldSplitAndMerge != null) {
-          admin.releaseSplitOrMergeLockAndRollback();
-        }
-      }
-    }
+    onlineConsistencyRepair();
 
     if (checkRegionBoundaries) {
       checkRegionBoundaries();
@@ -723,8 +596,8 @@ public class HBaseFsck extends Configured implements Closeable {
 
     checkAndFixReplication();
 
-    // Remove the hbck lock
-    unlockHbck();
+    // Remove the hbck znode
+    clenupHbckZnode();
 
     // Print table summary
     printTableSummary(tablesInfo);
@@ -743,7 +616,7 @@ public class HBaseFsck extends Configured implements Closeable {
   @Override
   public void close() throws IOException {
     try {
-      unlockHbck();
+      clenupHbckZnode();
     } catch (Exception io) {
       LOG.warn(io);
     } finally {
@@ -1789,11 +1662,6 @@ public class HBaseFsck extends Configured implements Closeable {
     });
   }
 
-  private ServerName getMetaRegionServerName(int replicaId)
-  throws IOException, KeeperException {
-    return new MetaTableLocator().getMetaRegionLocation(zkw, replicaId);
-  }
-
   /**
    * Contacts each regionserver and fetches metadata about regions.
    * @param regionServerList - the list of region servers to connect to
@@ -4179,43 +4047,6 @@ public class HBaseFsck extends Configured implements Closeable {
   }
 
   /**
-   * Disable the load balancer.
-   */
-  public static void setDisableBalancer() {
-    disableBalancer = true;
-  }
-
-  /**
-   * Disable the split and merge
-   */
-  public static void setDisableSplitAndMerge() {
-    setDisableSplitAndMerge(true);
-  }
-
-  @VisibleForTesting
-  public static void setDisableSplitAndMerge(boolean flag) {
-    disableSplitAndMerge = flag;
-  }
-
-  /**
-   * The balancer should be disabled if we are modifying HBase.
-   * It can be disabled if you want to prevent region movement from causing
-   * false positives.
-   */
-  public boolean shouldDisableBalancer() {
-    return fixAny || disableBalancer;
-  }
-
-  /**
-   * The split and merge should be disabled if we are modifying HBase.
-   * It can be disabled if you want to prevent region movement from causing
-   * false positives.
-   */
-  public boolean shouldDisableSplitAndMerge() {
-    return fixAny || disableSplitAndMerge;
-  }
-
-  /**
    * Set summary mode.
    * Print only summary of the tables and status (OK or INCONSISTENT)
    */
@@ -4572,10 +4403,6 @@ public class HBaseFsck extends Configured implements Closeable {
         setDisplayFullReport();
       } else if (cmd.equals("-exclusive")) {
         setForceExclusive();
-      } else if (cmd.equals("-disableBalancer")) {
-        setDisableBalancer();
-      }  else if (cmd.equals("-disableSplitAndMerge")) {
-        setDisableSplitAndMerge();
       } else if (cmd.equals("-timelag")) {
         if (i == args.length - 1) {
           errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
index b975c43..01206d4 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
@@ -231,6 +231,4 @@ public class SplitOrMergeTracker {
       return builder.build();
     }
   }
-
-
 }
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 60b62e4..46b0319 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -297,6 +297,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
+  public boolean isInMaintenanceMode() {
+    return false;
+  }
+
+  @Override
   public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
     return 0;
   }
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index 84ef6da..d8b01f0 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
@@ -63,7 +62,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
 import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -72,16 +70,12 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
@@ -95,8 +89,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*;
 import static org.junit.Assert.*;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
 
 @Category({MiscTests.class, LargeTests.class})
 public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
@@ -484,10 +476,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
       @Override
       public HBaseFsck call(){
         Configuration c = new Configuration(conf);
-        c.setInt("hbase.hbck.lockfile.attempts", 1);
-        // HBASE-13574 found that in HADOOP-2.6 and later, the create file would internally retry.
-        // To avoid flakiness of the test, set low max wait time.
-        c.setInt("hbase.hbck.lockfile.maxwaittime", 3);
+        c.setInt("hbase.hbck.createznode.attempts", 1);
         try{
           return doFsck(c, true); // Exclusive hbck only when fixing
         } catch(Exception e){
@@ -532,25 +521,20 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
     // With the ExponentialBackoffPolicyWithLimit (starting with 200 milliseconds sleep time, and
     // max sleep time of 5 seconds), we can retry around 15 times within 80 seconds before bail out.
     //
-    // Note: the reason to use 80 seconds is that in HADOOP-2.6 and later, the create file would
-    // retry up to HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).  See HBASE-13574 for more
-    // details.
     final int timeoutInSeconds = 80;
     final int sleepIntervalInMilliseconds = 200;
-    final int maxSleepTimeInMilliseconds = 6000;
+    final int maxSleepTimeInMilliseconds = 5000;
     final int maxRetryAttempts = 15;
 
     class RunHbck implements Callable{
-
       @Override
       public HBaseFsck call() throws Exception {
         // Increase retry attempts to make sure the non-active hbck doesn't get starved
         Configuration c = new Configuration(conf);
-        c.setInt("hbase.hbck.lockfile.maxwaittime", timeoutInSeconds);
-        c.setInt("hbase.hbck.lockfile.attempt.sleep.interval", sleepIntervalInMilliseconds);
-        c.setInt("hbase.hbck.lockfile.attempt.maxsleeptime", maxSleepTimeInMilliseconds);
-        c.setInt("hbase.hbck.lockfile.attempts", maxRetryAttempts);
-        return doFsck(c, false);
+        c.setInt("hbase.hbck.createznode.attempt.sleep.interval", sleepIntervalInMilliseconds);
+        c.setInt("hbase.hbck.createznode.attempt.maxsleeptime", maxSleepTimeInMilliseconds);
+        c.setInt("hbase.hbck.createznode.attempts", maxRetryAttempts);
+        return doFsck(c, true); // Exclusive hbck only when fixing
       }
     }
 
@@ -567,7 +551,6 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
     assertNotNull(h2);
     assert(h1.getRetCode() >= 0);
     assert(h2.getRetCode() >= 0);
-
   }
 
   @Test (timeout = 180000)
@@ -1850,53 +1833,4 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
     doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
     hbck.close();
   }
-
-  /**
-   *  See HBASE-15406
-   * */
-  @Test
-  public void testSplitOrMergeStatWhenHBCKAbort() throws Exception {
-    admin.setSplitOrMergeEnabled(true, false, true,
-      MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
-    boolean oldSplit = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
-    boolean oldMerge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-
-    assertTrue(oldSplit);
-    assertTrue(oldMerge);
-
-    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
-    HBaseFsck hbck = new HBaseFsck(conf, exec);
-    HBaseFsck.setDisplayFullReport(); // i.e. -details
-    final HBaseFsck spiedHbck = spy(hbck);
-    doAnswer(new Answer() {
-      @Override
-      public Object answer(InvocationOnMock invocation) throws Throwable {
-        // we close splitOrMerge flag in hbck, so in finally hbck will not set splitOrMerge back.
-        spiedHbck.setDisableSplitAndMerge(false);
-        return null;
-      }
-    }).when(spiedHbck).onlineConsistencyRepair();
-    spiedHbck.setDisableSplitAndMerge();
-    spiedHbck.connect();
-    spiedHbck.onlineHbck();
-    spiedHbck.close();
-
-    boolean split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
-    boolean merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-    assertFalse(split);
-    assertFalse(merge);
-
-    // rerun hbck to repair the switches state
-    hbck = new HBaseFsck(conf, exec);
-    hbck.setDisableSplitAndMerge();
-    hbck.connect();
-    hbck.onlineHbck();
-    hbck.close();
-
-    split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
-    merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-
-    assertTrue(split);
-    assertTrue(merge);
-  }
 }