diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index cfed9f6..adbc7f9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1312,13 +1312,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( - RpcController controller, MasterProtos.DispatchMergingRegionsRequest request) - throws ServiceException { - return stub.dispatchMergingRegions(controller, request); - } - - @Override public MasterProtos.MergeTableRegionsResponse mergeTableRegions( RpcController controller, MasterProtos.MergeTableRegionsRequest request) throws ServiceException { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 585a5f8..d862d5f 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -121,7 +121,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; @@ -1950,46 +1949,6 @@ public final class ProtobufUtil { } } - /** - * A helper to merge regions using admin protocol. Send request to - * regionserver. - * @param admin - * @param region_a - * @param region_b - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions - * @param user effective user - * @throws IOException - */ - public static void mergeRegions(final RpcController controller, - final AdminService.BlockingInterface admin, - final HRegionInfo region_a, final HRegionInfo region_b, - final boolean forcible, final User user) throws IOException { - final MergeRegionsRequest request = ProtobufUtil.buildMergeRegionsRequest( - region_a.getRegionName(), region_b.getRegionName(),forcible); - if (user != null) { - try { - user.runAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - admin.mergeRegions(controller, request); - return null; - } - }); - } catch (InterruptedException ie) { - InterruptedIOException iioe = new InterruptedIOException(); - iioe.initCause(ie); - throw iioe; - } - } else { - try { - admin.mergeRegions(controller, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - } - // End helpers for Admin /* @@ -3225,28 +3184,6 @@ public final class ProtobufUtil { } /** - * Create a MergeRegionsRequest for the given regions - * @param regionA name of region a - * @param regionB name of region b - * @param forcible true if it is a compulsory merge - * @return a MergeRegionsRequest - */ - public static MergeRegionsRequest buildMergeRegionsRequest( - final byte[] regionA, final byte[] regionB, final boolean forcible) { - MergeRegionsRequest.Builder builder = MergeRegionsRequest.newBuilder(); - RegionSpecifier regionASpecifier = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionA); - RegionSpecifier regionBSpecifier = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionB); - builder.setRegionA(regionASpecifier); - builder.setRegionB(regionBSpecifier); - builder.setForcible(forcible); - // send the master's wall clock time as well, so that the RS can refer to it - builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime()); - return builder.build(); - } - - /** * Get a ServerName from the passed in data bytes. * @param data Data with a serialize server name in it; can handle the old style * servername where servername was host and port. Works too with data that diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 4acb525..fd08d98 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTabl import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; @@ -1089,23 +1088,6 @@ public final class RequestConverter { return builder.build(); } - public static DispatchMergingRegionsRequest buildDispatchMergingRegionsRequest( - final byte[] encodedNameOfRegionA, - final byte[] encodedNameOfRegionB, - final boolean forcible, - final long nonceGroup, - final long nonce) throws DeserializationException { - DispatchMergingRegionsRequest.Builder builder = DispatchMergingRegionsRequest.newBuilder(); - builder.setRegionA(buildRegionSpecifier( - RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfRegionA)); - builder.setRegionB(buildRegionSpecifier( - RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfRegionB)); - builder.setForcible(forcible); - builder.setNonceGroup(nonceGroup); - builder.setNonce(nonce); - return builder.build(); - } - public static MergeTableRegionsRequest buildMergeTableRegionsRequest( final byte[][] encodedNameOfdaughaterRegions, final boolean forcible, diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java index 654d152..b4bd84d 100644 --- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java +++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java @@ -16538,1406 +16538,6 @@ public final class AdminProtos { } - public interface MergeRegionsRequestOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.MergeRegionsRequest) - org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { - - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - boolean hasRegionA(); - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA(); - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder(); - - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - boolean hasRegionB(); - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB(); - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder(); - - /** - * optional bool forcible = 3 [default = false]; - */ - boolean hasForcible(); - /** - * optional bool forcible = 3 [default = false]; - */ - boolean getForcible(); - - /** - *
-     * wall clock time from master
-     * 
- * - * optional uint64 master_system_time = 4; - */ - boolean hasMasterSystemTime(); - /** - *
-     * wall clock time from master
-     * 
- * - * optional uint64 master_system_time = 4; - */ - long getMasterSystemTime(); - } - /** - *
-   **
-   * Merges the specified regions.
-   * <p>
-   * This method currently closes the regions and then merges them
-   * 
- * - * Protobuf type {@code hbase.pb.MergeRegionsRequest} - */ - public static final class MergeRegionsRequest extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.MergeRegionsRequest) - MergeRegionsRequestOrBuilder { - // Use MergeRegionsRequest.newBuilder() to construct. - private MergeRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private MergeRegionsRequest() { - forcible_ = false; - masterSystemTime_ = 0L; - } - - @java.lang.Override - public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private MergeRegionsRequest( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - this(); - int mutable_bitField0_ = 0; - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = regionA_.toBuilder(); - } - regionA_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(regionA_); - regionA_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - case 18: { - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = regionB_.toBuilder(); - } - regionB_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(regionB_); - regionB_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000002; - break; - } - case 24: { - bitField0_ |= 0x00000004; - forcible_ = input.readBool(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - masterSystemTime_ = input.readUInt64(); - break; - } - } - } - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder.class); - } - - private int bitField0_; - public static final int REGION_A_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_; - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public boolean hasRegionA() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() { - return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() { - return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; - } - - public static final int REGION_B_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_; - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public boolean hasRegionB() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() { - return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() { - return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; - } - - public static final int FORCIBLE_FIELD_NUMBER = 3; - private boolean forcible_; - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean hasForcible() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean getForcible() { - return forcible_; - } - - public static final int MASTER_SYSTEM_TIME_FIELD_NUMBER = 4; - private long masterSystemTime_; - /** - *
-     * wall clock time from master
-     * 
- * - * optional uint64 master_system_time = 4; - */ - public boolean hasMasterSystemTime() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - *
-     * wall clock time from master
-     * 
- * - * optional uint64 master_system_time = 4; - */ - public long getMasterSystemTime() { - return masterSystemTime_; - } - - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - if (!hasRegionA()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRegionB()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegionA().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegionB().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, getRegionA()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, getRegionB()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(3, forcible_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(4, masterSystemTime_); - } - unknownFields.writeTo(output); - } - - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getRegionA()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getRegionB()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeBoolSize(3, forcible_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, masterSystemTime_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest) obj; - - boolean result = true; - result = result && (hasRegionA() == other.hasRegionA()); - if (hasRegionA()) { - result = result && getRegionA() - .equals(other.getRegionA()); - } - result = result && (hasRegionB() == other.hasRegionB()); - if (hasRegionB()) { - result = result && getRegionB() - .equals(other.getRegionB()); - } - result = result && (hasForcible() == other.hasForcible()); - if (hasForcible()) { - result = result && (getForcible() - == other.getForcible()); - } - result = result && (hasMasterSystemTime() == other.hasMasterSystemTime()); - if (hasMasterSystemTime()) { - result = result && (getMasterSystemTime() - == other.getMasterSystemTime()); - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegionA()) { - hash = (37 * hash) + REGION_A_FIELD_NUMBER; - hash = (53 * hash) + getRegionA().hashCode(); - } - if (hasRegionB()) { - hash = (37 * hash) + REGION_B_FIELD_NUMBER; - hash = (53 * hash) + getRegionB().hashCode(); - } - if (hasForcible()) { - hash = (37 * hash) + FORCIBLE_FIELD_NUMBER; - hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( - getForcible()); - } - if (hasMasterSystemTime()) { - hash = (37 * hash) + MASTER_SYSTEM_TIME_FIELD_NUMBER; - hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( - getMasterSystemTime()); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(byte[] data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom( - byte[] data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseDelimitedFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - *
-     **
-     * Merges the specified regions.
-     * <p>
-     * This method currently closes the regions and then merges them
-     * 
- * - * Protobuf type {@code hbase.pb.MergeRegionsRequest} - */ - public static final class Builder extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:hbase.pb.MergeRegionsRequest) - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequestOrBuilder { - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - getRegionAFieldBuilder(); - getRegionBFieldBuilder(); - } - } - public Builder clear() { - super.clear(); - if (regionABuilder_ == null) { - regionA_ = null; - } else { - regionABuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (regionBBuilder_ == null) { - regionB_ = null; - } else { - regionBBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - forcible_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - masterSystemTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_descriptor; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest build() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest buildPartial() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (regionABuilder_ == null) { - result.regionA_ = regionA_; - } else { - result.regionA_ = regionABuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (regionBBuilder_ == null) { - result.regionB_ = regionB_; - } else { - result.regionB_ = regionBBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.forcible_ = forcible_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.masterSystemTime_ = masterSystemTime_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder clone() { - return (Builder) super.clone(); - } - public Builder setField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.setField(field, value); - } - public Builder clearField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - public Builder clearOneof( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - public Builder setRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - int index, Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - public Builder addRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.addRepeatedField(field, value); - } - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest) { - return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest other) { - if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance()) return this; - if (other.hasRegionA()) { - mergeRegionA(other.getRegionA()); - } - if (other.hasRegionB()) { - mergeRegionB(other.getRegionB()); - } - if (other.hasForcible()) { - setForcible(other.getForcible()); - } - if (other.hasMasterSystemTime()) { - setMasterSystemTime(other.getMasterSystemTime()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - public final boolean isInitialized() { - if (!hasRegionA()) { - return false; - } - if (!hasRegionB()) { - return false; - } - if (!getRegionA().isInitialized()) { - return false; - } - if (!getRegionB().isInitialized()) { - return false; - } - return true; - } - - public Builder mergeFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_ = null; - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionABuilder_; - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public boolean hasRegionA() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() { - if (regionABuilder_ == null) { - return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; - } else { - return regionABuilder_.getMessage(); - } - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public Builder setRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionABuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - regionA_ = value; - onChanged(); - } else { - regionABuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public Builder setRegionA( - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionABuilder_ == null) { - regionA_ = builderForValue.build(); - onChanged(); - } else { - regionABuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public Builder mergeRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionABuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - regionA_ != null && - regionA_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { - regionA_ = - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionA_).mergeFrom(value).buildPartial(); - } else { - regionA_ = value; - } - onChanged(); - } else { - regionABuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public Builder clearRegionA() { - if (regionABuilder_ == null) { - regionA_ = null; - onChanged(); - } else { - regionABuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionABuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegionAFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() { - if (regionABuilder_ != null) { - return regionABuilder_.getMessageOrBuilder(); - } else { - return regionA_ == null ? - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; - } - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> - getRegionAFieldBuilder() { - if (regionABuilder_ == null) { - regionABuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - getRegionA(), - getParentForChildren(), - isClean()); - regionA_ = null; - } - return regionABuilder_; - } - - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_ = null; - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBBuilder_; - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public boolean hasRegionB() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() { - if (regionBBuilder_ == null) { - return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; - } else { - return regionBBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public Builder setRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - regionB_ = value; - onChanged(); - } else { - regionBBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public Builder setRegionB( - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionBBuilder_ == null) { - regionB_ = builderForValue.build(); - onChanged(); - } else { - regionBBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public Builder mergeRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - regionB_ != null && - regionB_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { - regionB_ = - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionB_).mergeFrom(value).buildPartial(); - } else { - regionB_ = value; - } - onChanged(); - } else { - regionBBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public Builder clearRegionB() { - if (regionBBuilder_ == null) { - regionB_ = null; - onChanged(); - } else { - regionBBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getRegionBFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() { - if (regionBBuilder_ != null) { - return regionBBuilder_.getMessageOrBuilder(); - } else { - return regionB_ == null ? - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; - } - } - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> - getRegionBFieldBuilder() { - if (regionBBuilder_ == null) { - regionBBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - getRegionB(), - getParentForChildren(), - isClean()); - regionB_ = null; - } - return regionBBuilder_; - } - - private boolean forcible_ ; - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean hasForcible() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean getForcible() { - return forcible_; - } - /** - * optional bool forcible = 3 [default = false]; - */ - public Builder setForcible(boolean value) { - bitField0_ |= 0x00000004; - forcible_ = value; - onChanged(); - return this; - } - /** - * optional bool forcible = 3 [default = false]; - */ - public Builder clearForcible() { - bitField0_ = (bitField0_ & ~0x00000004); - forcible_ = false; - onChanged(); - return this; - } - - private long masterSystemTime_ ; - /** - *
-       * wall clock time from master
-       * 
- * - * optional uint64 master_system_time = 4; - */ - public boolean hasMasterSystemTime() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - *
-       * wall clock time from master
-       * 
- * - * optional uint64 master_system_time = 4; - */ - public long getMasterSystemTime() { - return masterSystemTime_; - } - /** - *
-       * wall clock time from master
-       * 
- * - * optional uint64 master_system_time = 4; - */ - public Builder setMasterSystemTime(long value) { - bitField0_ |= 0x00000008; - masterSystemTime_ = value; - onChanged(); - return this; - } - /** - *
-       * wall clock time from master
-       * 
- * - * optional uint64 master_system_time = 4; - */ - public Builder clearMasterSystemTime() { - bitField0_ = (bitField0_ & ~0x00000008); - masterSystemTime_ = 0L; - onChanged(); - return this; - } - public final Builder setUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - public final Builder mergeUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:hbase.pb.MergeRegionsRequest) - } - - // @@protoc_insertion_point(class_scope:hbase.pb.MergeRegionsRequest) - private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest(); - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser - PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { - public MergeRegionsRequest parsePartialFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return new MergeRegionsRequest(input, extensionRegistry); - } - }; - - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface MergeRegionsResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.MergeRegionsResponse) - org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { - } - /** - * Protobuf type {@code hbase.pb.MergeRegionsResponse} - */ - public static final class MergeRegionsResponse extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.MergeRegionsResponse) - MergeRegionsResponseOrBuilder { - // Use MergeRegionsResponse.newBuilder() to construct. - private MergeRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private MergeRegionsResponse() { - } - - @java.lang.Override - public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private MergeRegionsResponse( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - this(); - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.Builder.class); - } - - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - unknownFields.writeTo(output); - } - - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) obj; - - boolean result = true; - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(byte[] data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom( - byte[] data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseDelimitedFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.MergeRegionsResponse} - */ - public static final class Builder extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:hbase.pb.MergeRegionsResponse) - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponseOrBuilder { - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - public Builder clear() { - super.clear(); - return this; - } - - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_descriptor; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse build() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse buildPartial() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse(this); - onBuilt(); - return result; - } - - public Builder clone() { - return (Builder) super.clone(); - } - public Builder setField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.setField(field, value); - } - public Builder clearField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - public Builder clearOneof( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - public Builder setRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - int index, Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - public Builder addRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.addRepeatedField(field, value); - } - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) { - return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse other) { - if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - public final Builder setUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - public final Builder mergeUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:hbase.pb.MergeRegionsResponse) - } - - // @@protoc_insertion_point(class_scope:hbase.pb.MergeRegionsResponse) - private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse(); - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser - PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { - public MergeRegionsResponse parsePartialFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return new MergeRegionsResponse(input, extensionRegistry); - } - }; - - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - public interface WALEntryOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.WALEntry) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -26239,14 +24839,6 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** - * rpc MergeRegions(.hbase.pb.MergeRegionsRequest) returns (.hbase.pb.MergeRegionsResponse); - */ - public abstract void mergeRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); - - /** * rpc ReplicateWALEntry(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse); */ public abstract void replicateWALEntry( @@ -26396,14 +24988,6 @@ public final class AdminProtos { } @java.lang.Override - public void mergeRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { - impl.mergeRegions(controller, request, done); - } - - @java.lang.Override public void replicateWALEntry( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, @@ -26510,22 +25094,20 @@ public final class AdminProtos { case 9: return impl.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request); case 10: - return impl.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request); - case 11: return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); - case 12: + case 11: return impl.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); - case 13: + case 12: return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request); - case 14: + case 13: return impl.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request); - case 15: + case 14: return impl.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request); - case 16: + case 15: return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); - case 17: + case 16: return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); - case 18: + case 17: return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -26562,22 +25144,20 @@ public final class AdminProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); - case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -26614,22 +25194,20 @@ public final class AdminProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); - case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -26720,14 +25298,6 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** - * rpc MergeRegions(.hbase.pb.MergeRegionsRequest) returns (.hbase.pb.MergeRegionsResponse); - */ - public abstract void mergeRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); - - /** * rpc ReplicateWALEntry(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse); */ public abstract void replicateWALEntry( @@ -26864,46 +25434,41 @@ public final class AdminProtos { done)); return; case 10: - this.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 11: this.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 11: this.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 12: this.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 13: this.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 14: this.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 16: + case 15: this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 17: + case 16: this.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 18: + case 17: this.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -26943,22 +25508,20 @@ public final class AdminProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); - case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -26995,22 +25558,20 @@ public final class AdminProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); - case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -27183,27 +25744,12 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance())); } - public void mergeRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(10), - controller, - request, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(), - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.class, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance())); - } - public void replicateWALEntry( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -27218,7 +25764,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -27233,7 +25779,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(), @@ -27248,7 +25794,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(), @@ -27263,7 +25809,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(), @@ -27278,7 +25824,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(), @@ -27293,7 +25839,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(), @@ -27308,7 +25854,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(), @@ -27375,11 +25921,6 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse mergeRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse replicateWALEntry( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) @@ -27548,24 +26089,12 @@ public final class AdminProtos { } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse mergeRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), - controller, - request, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()); - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse replicateWALEntry( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -27577,7 +26106,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -27589,7 +26118,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance()); @@ -27601,7 +26130,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance()); @@ -27613,7 +26142,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance()); @@ -27625,7 +26154,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()); @@ -27637,7 +26166,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); @@ -27649,7 +26178,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()); @@ -27781,16 +26310,6 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_UpdateFavoredNodesResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_MergeRegionsRequest_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_MergeRegionsResponse_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_WALEntry_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -27925,74 +26444,68 @@ public final class AdminProtos { "eInfo\032e\n\020RegionUpdateInfo\022$\n\006region\030\001 \002(" + "\0132\024.hbase.pb.RegionInfo\022+\n\rfavored_nodes" + "\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032UpdateFa" + - "voredNodesResponse\022\020\n\010response\030\001 \001(\r\"\244\001\n" + - "\023MergeRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031" + - ".hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 " + - "\002(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcib", - "le\030\003 \001(\010:\005false\022\032\n\022master_system_time\030\004 " + - "\001(\004\"\026\n\024MergeRegionsResponse\"a\n\010WALEntry\022" + - "\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n\017key_va" + - "lue_bytes\030\002 \003(\014\022\035\n\025associated_cell_count" + - "\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryRequest\022!\n\005e" + - "ntry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n\024replic" + - "ationClusterId\030\002 \001(\t\022\"\n\032sourceBaseNamesp" + - "aceDirPath\030\003 \001(\t\022!\n\031sourceHFileArchiveDi" + - "rPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryResponse" + - "\"\026\n\024RollWALWriterRequest\"0\n\025RollWALWrite", - "rResponse\022\027\n\017region_to_flush\030\001 \003(\014\"#\n\021St" + - "opServerRequest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopS" + - "erverResponse\"\026\n\024GetServerInfoRequest\"K\n" + - "\nServerInfo\022)\n\013server_name\030\001 \002(\0132\024.hbase" + - ".pb.ServerName\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025Ge" + - "tServerInfoResponse\022)\n\013server_info\030\001 \002(\013" + - "2\024.hbase.pb.ServerInfo\"\034\n\032UpdateConfigur" + - "ationRequest\"\035\n\033UpdateConfigurationRespo" + - "nse\"?\n\024GetRegionLoadRequest\022\'\n\ntable_nam" + - "e\030\001 \001(\0132\023.hbase.pb.TableName\"C\n\025GetRegio", - "nLoadResponse\022*\n\014region_loads\030\001 \003(\0132\024.hb" + - "ase.pb.RegionLoad2\322\014\n\014AdminService\022P\n\rGe" + - "tRegionInfo\022\036.hbase.pb.GetRegionInfoRequ" + - "est\032\037.hbase.pb.GetRegionInfoResponse\022M\n\014" + - "GetStoreFile\022\035.hbase.pb.GetStoreFileRequ" + - "est\032\036.hbase.pb.GetStoreFileResponse\022V\n\017G" + - "etOnlineRegion\022 .hbase.pb.GetOnlineRegio" + - "nRequest\032!.hbase.pb.GetOnlineRegionRespo" + - "nse\022G\n\nOpenRegion\022\033.hbase.pb.OpenRegionR" + - "equest\032\034.hbase.pb.OpenRegionResponse\022M\n\014", - "WarmupRegion\022\035.hbase.pb.WarmupRegionRequ" + - "est\032\036.hbase.pb.WarmupRegionResponse\022J\n\013C" + - "loseRegion\022\034.hbase.pb.CloseRegionRequest" + - "\032\035.hbase.pb.CloseRegionResponse\022w\n\032Close" + - "RegionForSplitOrMerge\022+.hbase.pb.CloseRe" + - "gionForSplitOrMergeRequest\032,.hbase.pb.Cl" + - "oseRegionForSplitOrMergeResponse\022J\n\013Flus" + - "hRegion\022\034.hbase.pb.FlushRegionRequest\032\035." + - "hbase.pb.FlushRegionResponse\022J\n\013SplitReg" + - "ion\022\034.hbase.pb.SplitRegionRequest\032\035.hbas", - "e.pb.SplitRegionResponse\022P\n\rCompactRegio" + - "n\022\036.hbase.pb.CompactRegionRequest\032\037.hbas" + - "e.pb.CompactRegionResponse\022M\n\014MergeRegio" + - "ns\022\035.hbase.pb.MergeRegionsRequest\032\036.hbas" + - "e.pb.MergeRegionsResponse\022\\\n\021ReplicateWA" + - "LEntry\022\".hbase.pb.ReplicateWALEntryReque" + - "st\032#.hbase.pb.ReplicateWALEntryResponse\022" + - "Q\n\006Replay\022\".hbase.pb.ReplicateWALEntryRe" + - "quest\032#.hbase.pb.ReplicateWALEntryRespon" + - "se\022P\n\rRollWALWriter\022\036.hbase.pb.RollWALWr", - "iterRequest\032\037.hbase.pb.RollWALWriterResp" + - "onse\022P\n\rGetServerInfo\022\036.hbase.pb.GetServ" + - "erInfoRequest\032\037.hbase.pb.GetServerInfoRe" + - "sponse\022G\n\nStopServer\022\033.hbase.pb.StopServ" + - "erRequest\032\034.hbase.pb.StopServerResponse\022" + - "_\n\022UpdateFavoredNodes\022#.hbase.pb.UpdateF" + - "avoredNodesRequest\032$.hbase.pb.UpdateFavo" + - "redNodesResponse\022b\n\023UpdateConfiguration\022" + - "$.hbase.pb.UpdateConfigurationRequest\032%." + - "hbase.pb.UpdateConfigurationResponse\022P\n\r", - "GetRegionLoad\022\036.hbase.pb.GetRegionLoadRe" + - "quest\032\037.hbase.pb.GetRegionLoadResponseBH" + - "\n1org.apache.hadoop.hbase.shaded.protobu" + - "f.generatedB\013AdminProtosH\001\210\001\001\240\001\001" + "voredNodesResponse\022\020\n\010response\030\001 \001(\r\"a\n\010" + + "WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022" + + "\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_c" + + "ell_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryReq", + "uest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022" + + "\034\n\024replicationClusterId\030\002 \001(\t\022\"\n\032sourceB" + + "aseNamespaceDirPath\030\003 \001(\t\022!\n\031sourceHFile" + + "ArchiveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntr" + + "yResponse\"\026\n\024RollWALWriterRequest\"0\n\025Rol" + + "lWALWriterResponse\022\027\n\017region_to_flush\030\001 " + + "\003(\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t" + + "\"\024\n\022StopServerResponse\"\026\n\024GetServerInfoR" + + "equest\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(" + + "\0132\024.hbase.pb.ServerName\022\022\n\nwebui_port\030\002 ", + "\001(\r\"B\n\025GetServerInfoResponse\022)\n\013server_i" + + "nfo\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032Updat" + + "eConfigurationRequest\"\035\n\033UpdateConfigura" + + "tionResponse\"?\n\024GetRegionLoadRequest\022\'\n\n" + + "table_name\030\001 \001(\0132\023.hbase.pb.TableName\"C\n" + + "\025GetRegionLoadResponse\022*\n\014region_loads\030\001" + + " \003(\0132\024.hbase.pb.RegionLoad2\203\014\n\014AdminServ" + + "ice\022P\n\rGetRegionInfo\022\036.hbase.pb.GetRegio" + + "nInfoRequest\032\037.hbase.pb.GetRegionInfoRes" + + "ponse\022M\n\014GetStoreFile\022\035.hbase.pb.GetStor", + "eFileRequest\032\036.hbase.pb.GetStoreFileResp" + + "onse\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOn" + + "lineRegionRequest\032!.hbase.pb.GetOnlineRe" + + "gionResponse\022G\n\nOpenRegion\022\033.hbase.pb.Op" + + "enRegionRequest\032\034.hbase.pb.OpenRegionRes" + + "ponse\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupR" + + "egionRequest\032\036.hbase.pb.WarmupRegionResp" + + "onse\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegi" + + "onRequest\032\035.hbase.pb.CloseRegionResponse" + + "\022w\n\032CloseRegionForSplitOrMerge\022+.hbase.p", + "b.CloseRegionForSplitOrMergeRequest\032,.hb" + + "ase.pb.CloseRegionForSplitOrMergeRespons" + + "e\022J\n\013FlushRegion\022\034.hbase.pb.FlushRegionR" + + "equest\032\035.hbase.pb.FlushRegionResponse\022J\n" + + "\013SplitRegion\022\034.hbase.pb.SplitRegionReque" + + "st\032\035.hbase.pb.SplitRegionResponse\022P\n\rCom" + + "pactRegion\022\036.hbase.pb.CompactRegionReque" + + "st\032\037.hbase.pb.CompactRegionResponse\022\\\n\021R" + + "eplicateWALEntry\022\".hbase.pb.ReplicateWAL" + + "EntryRequest\032#.hbase.pb.ReplicateWALEntr", + "yResponse\022Q\n\006Replay\022\".hbase.pb.Replicate" + + "WALEntryRequest\032#.hbase.pb.ReplicateWALE" + + "ntryResponse\022P\n\rRollWALWriter\022\036.hbase.pb" + + ".RollWALWriterRequest\032\037.hbase.pb.RollWAL" + + "WriterResponse\022P\n\rGetServerInfo\022\036.hbase." + + "pb.GetServerInfoRequest\032\037.hbase.pb.GetSe" + + "rverInfoResponse\022G\n\nStopServer\022\033.hbase.p" + + "b.StopServerRequest\032\034.hbase.pb.StopServe" + + "rResponse\022_\n\022UpdateFavoredNodes\022#.hbase." + + "pb.UpdateFavoredNodesRequest\032$.hbase.pb.", + "UpdateFavoredNodesResponse\022b\n\023UpdateConf" + + "iguration\022$.hbase.pb.UpdateConfiguration" + + "Request\032%.hbase.pb.UpdateConfigurationRe" + + "sponse\022P\n\rGetRegionLoad\022\036.hbase.pb.GetRe" + + "gionLoadRequest\032\037.hbase.pb.GetRegionLoad" + + "ResponseBH\n1org.apache.hadoop.hbase.shad" + + "ed.protobuf.generatedB\013AdminProtosH\001\210\001\001\240" + + "\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -28153,98 +26666,86 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateFavoredNodesResponse_descriptor, new java.lang.String[] { "Response", }); - internal_static_hbase_pb_MergeRegionsRequest_descriptor = - getDescriptor().getMessageTypes().get(22); - internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable = new - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_hbase_pb_MergeRegionsRequest_descriptor, - new java.lang.String[] { "RegionA", "RegionB", "Forcible", "MasterSystemTime", }); - internal_static_hbase_pb_MergeRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(23); - internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable = new - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_hbase_pb_MergeRegionsResponse_descriptor, - new java.lang.String[] { }); internal_static_hbase_pb_WALEntry_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_WALEntry_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_WALEntry_descriptor, new java.lang.String[] { "Key", "KeyValueBytes", "AssociatedCellCount", }); internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_ReplicateWALEntryRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor, new java.lang.String[] { "Entry", "ReplicationClusterId", "SourceBaseNamespaceDirPath", "SourceHFileArchiveDirPath", }); internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_ReplicateWALEntryResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RollWALWriterRequest_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_RollWALWriterRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RollWALWriterRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RollWALWriterResponse_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(26); internal_static_hbase_pb_RollWALWriterResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RollWALWriterResponse_descriptor, new java.lang.String[] { "RegionToFlush", }); internal_static_hbase_pb_StopServerRequest_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(27); internal_static_hbase_pb_StopServerRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopServerRequest_descriptor, new java.lang.String[] { "Reason", }); internal_static_hbase_pb_StopServerResponse_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(28); internal_static_hbase_pb_StopServerResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopServerResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetServerInfoRequest_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(29); internal_static_hbase_pb_GetServerInfoRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetServerInfoRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ServerInfo_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(30); internal_static_hbase_pb_ServerInfo_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ServerInfo_descriptor, new java.lang.String[] { "ServerName", "WebuiPort", }); internal_static_hbase_pb_GetServerInfoResponse_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(31); internal_static_hbase_pb_GetServerInfoResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetServerInfoResponse_descriptor, new java.lang.String[] { "ServerInfo", }); internal_static_hbase_pb_UpdateConfigurationRequest_descriptor = - getDescriptor().getMessageTypes().get(34); + getDescriptor().getMessageTypes().get(32); internal_static_hbase_pb_UpdateConfigurationRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_UpdateConfigurationResponse_descriptor = - getDescriptor().getMessageTypes().get(35); + getDescriptor().getMessageTypes().get(33); internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetRegionLoadRequest_descriptor = - getDescriptor().getMessageTypes().get(36); + getDescriptor().getMessageTypes().get(34); internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetRegionLoadRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetRegionLoadResponse_descriptor = - getDescriptor().getMessageTypes().get(37); + getDescriptor().getMessageTypes().get(35); internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetRegionLoadResponse_descriptor, diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java index 8f293f3..6a737b8 100644 --- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java +++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java @@ -5507,35 +5507,33 @@ public final class MasterProtos { } - public interface DispatchMergingRegionsRequestOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.DispatchMergingRegionsRequest) + public interface MergeTableRegionsRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - boolean hasRegionA(); - /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA(); + java.util.List + getRegionList(); /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder(); - + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index); /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - boolean hasRegionB(); + int getRegionCount(); /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB(); + java.util.List + getRegionOrBuilderList(); /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder(); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index); /** * optional bool forcible = 3 [default = false]; @@ -5567,20 +5565,21 @@ public final class MasterProtos { /** *
    **
-   * Dispatch merging the specified regions.
+   * Merging the specified regions in a table.
    * 
* - * Protobuf type {@code hbase.pb.DispatchMergingRegionsRequest} + * Protobuf type {@code hbase.pb.MergeTableRegionsRequest} */ - public static final class DispatchMergingRegionsRequest extends + public static final class MergeTableRegionsRequest extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.DispatchMergingRegionsRequest) - DispatchMergingRegionsRequestOrBuilder { - // Use DispatchMergingRegionsRequest.newBuilder() to construct. - private DispatchMergingRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsRequest) + MergeTableRegionsRequestOrBuilder { + // Use MergeTableRegionsRequest.newBuilder() to construct. + private MergeTableRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private DispatchMergingRegionsRequest() { + private MergeTableRegionsRequest() { + region_ = java.util.Collections.emptyList(); forcible_ = false; nonceGroup_ = 0L; nonce_ = 0L; @@ -5591,7 +5590,7 @@ public final class MasterProtos { getUnknownFields() { return this.unknownFields; } - private DispatchMergingRegionsRequest( + private MergeTableRegionsRequest( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { @@ -5615,43 +5614,26 @@ public final class MasterProtos { break; } case 10: { - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = regionA_.toBuilder(); - } - regionA_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(regionA_); - regionA_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - case 18: { - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = regionB_.toBuilder(); - } - regionB_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(regionB_); - regionB_ = subBuilder.buildPartial(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } - bitField0_ |= 0x00000002; + region_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry)); break; } case 24: { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000001; forcible_ = input.readBool(); break; } case 32: { - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000002; nonceGroup_ = input.readUInt64(); break; } case 40: { - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000004; nonce_ = input.readUInt64(); break; } @@ -5663,63 +5645,59 @@ public final class MasterProtos { throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class); } private int bitField0_; - public static final int REGION_A_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_; - /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public boolean hasRegionA() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } + public static final int REGION_FIELD_NUMBER = 1; + private java.util.List region_; /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() { - return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; + public java.util.List getRegionList() { + return region_; } /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() { - return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; + public java.util.List + getRegionOrBuilderList() { + return region_; } - - public static final int REGION_B_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_; /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public boolean hasRegionB() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public int getRegionCount() { + return region_.size(); } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() { - return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + return region_.get(index); } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() { - return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + return region_.get(index); } public static final int FORCIBLE_FIELD_NUMBER = 3; @@ -5728,7 +5706,7 @@ public final class MasterProtos { * optional bool forcible = 3 [default = false]; */ public boolean hasForcible() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bool forcible = 3 [default = false]; @@ -5743,7 +5721,7 @@ public final class MasterProtos { * optional uint64 nonce_group = 4 [default = 0]; */ public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint64 nonce_group = 4 [default = 0]; @@ -5758,7 +5736,7 @@ public final class MasterProtos { * optional uint64 nonce = 5 [default = 0]; */ public boolean hasNonce() { - return ((bitField0_ & 0x00000010) == 0x00000010); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 nonce = 5 [default = 0]; @@ -5773,21 +5751,11 @@ public final class MasterProtos { if (isInitialized == 1) return true; if (isInitialized == 0) return false; - if (!hasRegionA()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRegionB()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegionA().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegionB().isInitialized()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -5795,19 +5763,16 @@ public final class MasterProtos { public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, getRegionA()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, getRegionB()); + for (int i = 0; i < region_.size(); i++) { + output.writeMessage(1, region_.get(i)); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(3, forcible_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(4, nonceGroup_); } - if (((bitField0_ & 0x00000010) == 0x00000010)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(5, nonce_); } unknownFields.writeTo(output); @@ -5818,23 +5783,19 @@ public final class MasterProtos { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getRegionA()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + for (int i = 0; i < region_.size(); i++) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getRegionB()); + .computeMessageSize(1, region_.get(i)); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeBoolSize(3, forcible_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeUInt64Size(4, nonceGroup_); } - if (((bitField0_ & 0x00000010) == 0x00000010)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeUInt64Size(5, nonce_); } @@ -5849,22 +5810,14 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest) obj; + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) obj; boolean result = true; - result = result && (hasRegionA() == other.hasRegionA()); - if (hasRegionA()) { - result = result && getRegionA() - .equals(other.getRegionA()); - } - result = result && (hasRegionB() == other.hasRegionB()); - if (hasRegionB()) { - result = result && getRegionB() - .equals(other.getRegionB()); - } + result = result && getRegionList() + .equals(other.getRegionList()); result = result && (hasForcible() == other.hasForcible()); if (hasForcible()) { result = result && (getForcible() @@ -5891,13 +5844,9 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegionA()) { - hash = (37 * hash) + REGION_A_FIELD_NUMBER; - hash = (53 * hash) + getRegionA().hashCode(); - } - if (hasRegionB()) { - hash = (37 * hash) + REGION_B_FIELD_NUMBER; - hash = (53 * hash) + getRegionB().hashCode(); + if (getRegionCount() > 0) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegionList().hashCode(); } if (hasForcible()) { hash = (37 * hash) + FORCIBLE_FIELD_NUMBER; @@ -5919,58 +5868,58 @@ public final class MasterProtos { return hash; } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(byte[] data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( byte[] data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -5982,7 +5931,7 @@ public final class MasterProtos { public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { @@ -5999,28 +5948,28 @@ public final class MasterProtos { /** *
      **
-     * Dispatch merging the specified regions.
+     * Merging the specified regions in a table.
      * 
* - * Protobuf type {@code hbase.pb.DispatchMergingRegionsRequest} + * Protobuf type {@code hbase.pb.MergeTableRegionsRequest} */ public static final class Builder extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:hbase.pb.DispatchMergingRegionsRequest) - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequestOrBuilder { + // @@protoc_insertion_point(builder_implements:hbase.pb.MergeTableRegionsRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequestOrBuilder { public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -6033,80 +5982,66 @@ public final class MasterProtos { private void maybeForceBuilderInitialization() { if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getRegionAFieldBuilder(); - getRegionBFieldBuilder(); + getRegionFieldBuilder(); } } public Builder clear() { super.clear(); - if (regionABuilder_ == null) { - regionA_ = null; - } else { - regionABuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (regionBBuilder_ == null) { - regionB_ = null; + if (regionBuilder_ == null) { + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); } else { - regionBBuilder_.clear(); + regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); forcible_ = false; - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000002); nonceGroup_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); + bitField0_ = (bitField0_ & ~0x00000004); nonce_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000008); return this; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest build() { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest result = buildPartial(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest buildPartial() { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest(this); + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (regionABuilder_ == null) { - result.regionA_ = regionA_; + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.region_ = region_; } else { - result.regionA_ = regionABuilder_.build(); + result.region_ = regionBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (regionBBuilder_ == null) { - result.regionB_ = regionB_; - } else { - result.regionB_ = regionBBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; + to_bitField0_ |= 0x00000001; } result.forcible_ = forcible_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; } result.nonceGroup_ = nonceGroup_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; } result.nonce_ = nonce_; result.bitField0_ = to_bitField0_; @@ -6141,21 +6076,41 @@ public final class MasterProtos { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest) { - return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest)other); + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest other) { - if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance()) return this; - if (other.hasRegionA()) { - mergeRegionA(other.getRegionA()); - } - if (other.hasRegionB()) { - mergeRegionB(other.getRegionB()); + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance()) return this; + if (regionBuilder_ == null) { + if (!other.region_.isEmpty()) { + if (region_.isEmpty()) { + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRegionIsMutable(); + region_.addAll(other.region_); + } + onChanged(); + } + } else { + if (!other.region_.isEmpty()) { + if (regionBuilder_.isEmpty()) { + regionBuilder_.dispose(); + regionBuilder_ = null; + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + regionBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getRegionFieldBuilder() : null; + } else { + regionBuilder_.addAllMessages(other.region_); + } + } } if (other.hasForcible()) { setForcible(other.getForcible()); @@ -6172,17 +6127,10 @@ public final class MasterProtos { } public final boolean isInitialized() { - if (!hasRegionA()) { - return false; - } - if (!hasRegionB()) { - return false; - } - if (!getRegionA().isInitialized()) { - return false; - } - if (!getRegionB().isInitialized()) { - return false; + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + return false; + } } return true; } @@ -6191,11 +6139,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parsedMessage = null; + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -6206,240 +6154,244 @@ public final class MasterProtos { } private int bitField0_; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_ = null; - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionABuilder_; + private java.util.List region_ = + java.util.Collections.emptyList(); + private void ensureRegionIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(region_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public boolean hasRegionA() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getRegionList() { + if (regionBuilder_ == null) { + return java.util.Collections.unmodifiableList(region_); + } else { + return regionBuilder_.getMessageList(); + } } /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() { - if (regionABuilder_ == null) { - return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; + public int getRegionCount() { + if (regionBuilder_ == null) { + return region_.size(); } else { - return regionABuilder_.getMessage(); + return regionBuilder_.getCount(); } } /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder setRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionABuilder_ == null) { + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + if (regionBuilder_ == null) { + return region_.get(index); + } else { + return regionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - regionA_ = value; + ensureRegionIsMutable(); + region_.set(index, value); onChanged(); } else { - regionABuilder_.setMessage(value); + regionBuilder_.setMessage(index, value); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder setRegionA( - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionABuilder_ == null) { - regionA_ = builderForValue.build(); + public Builder setRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.set(index, builderForValue.build()); onChanged(); } else { - regionABuilder_.setMessage(builderForValue.build()); + regionBuilder_.setMessage(index, builderForValue.build()); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder mergeRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionABuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - regionA_ != null && - regionA_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { - regionA_ = - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionA_).mergeFrom(value).buildPartial(); - } else { - regionA_ = value; + public Builder addRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureRegionIsMutable(); + region_.add(value); onChanged(); } else { - regionABuilder_.mergeFrom(value); + regionBuilder_.addMessage(value); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder clearRegionA() { - if (regionABuilder_ == null) { - regionA_ = null; + public Builder addRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.add(index, value); onChanged(); } else { - regionABuilder_.clear(); + regionBuilder_.addMessage(index, value); } - bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * required .hbase.pb.RegionSpecifier region_a = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionABuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegionAFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() { - if (regionABuilder_ != null) { - return regionABuilder_.getMessageOrBuilder(); + public Builder addRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(builderForValue.build()); + onChanged(); } else { - return regionA_ == null ? - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_; + regionBuilder_.addMessage(builderForValue.build()); } + return this; } /** - * required .hbase.pb.RegionSpecifier region_a = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> - getRegionAFieldBuilder() { - if (regionABuilder_ == null) { - regionABuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - getRegionA(), - getParentForChildren(), - isClean()); - regionA_ = null; + public Builder addRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(index, builderForValue.build()); + onChanged(); + } else { + regionBuilder_.addMessage(index, builderForValue.build()); } - return regionABuilder_; - } - - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_ = null; - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBBuilder_; - /** - * required .hbase.pb.RegionSpecifier region_b = 2; - */ - public boolean hasRegionB() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return this; } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() { - if (regionBBuilder_ == null) { - return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; + public Builder addAllRegion( + java.lang.Iterable values) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, region_); + onChanged(); } else { - return regionBBuilder_.getMessage(); + regionBuilder_.addAllMessages(values); } + return this; } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder setRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - regionB_ = value; + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - regionBBuilder_.setMessage(value); + regionBuilder_.clear(); } - bitField0_ |= 0x00000002; return this; } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder setRegionB( - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionBBuilder_ == null) { - regionB_ = builderForValue.build(); + public Builder removeRegion(int index) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.remove(index); onChanged(); } else { - regionBBuilder_.setMessage(builderForValue.build()); + regionBuilder_.remove(index); } - bitField0_ |= 0x00000002; return this; } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder mergeRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - regionB_ != null && - regionB_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { - regionB_ = - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionB_).mergeFrom(value).buildPartial(); - } else { - regionB_ = value; - } - onChanged(); - } else { - regionBBuilder_.mergeFrom(value); + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder( + int index) { + return getRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + if (regionBuilder_ == null) { + return region_.get(index); } else { + return regionBuilder_.getMessageOrBuilder(index); } - bitField0_ |= 0x00000002; - return this; } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder clearRegionB() { - if (regionBBuilder_ == null) { - regionB_ = null; - onChanged(); + public java.util.List + getRegionOrBuilderList() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilderList(); } else { - regionBBuilder_.clear(); + return java.util.Collections.unmodifiableList(region_); } - bitField0_ = (bitField0_ & ~0x00000002); - return this; } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getRegionBFieldBuilder().getBuilder(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder() { + return getRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() { - if (regionBBuilder_ != null) { - return regionBBuilder_.getMessageOrBuilder(); - } else { - return regionB_ == null ? - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_; - } + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder( + int index) { + return getRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); } /** - * required .hbase.pb.RegionSpecifier region_b = 2; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + public java.util.List + getRegionBuilderList() { + return getRegionFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> - getRegionBFieldBuilder() { - if (regionBBuilder_ == null) { - regionBBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - getRegionB(), + region_, + ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - regionB_ = null; + region_ = null; } - return regionBBuilder_; + return regionBuilder_; } private boolean forcible_ ; @@ -6447,7 +6399,7 @@ public final class MasterProtos { * optional bool forcible = 3 [default = false]; */ public boolean hasForcible() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool forcible = 3 [default = false]; @@ -6459,7 +6411,7 @@ public final class MasterProtos { * optional bool forcible = 3 [default = false]; */ public Builder setForcible(boolean value) { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; forcible_ = value; onChanged(); return this; @@ -6468,7 +6420,7 @@ public final class MasterProtos { * optional bool forcible = 3 [default = false]; */ public Builder clearForcible() { - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000002); forcible_ = false; onChanged(); return this; @@ -6479,7 +6431,7 @@ public final class MasterProtos { * optional uint64 nonce_group = 4 [default = 0]; */ public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint64 nonce_group = 4 [default = 0]; @@ -6491,7 +6443,7 @@ public final class MasterProtos { * optional uint64 nonce_group = 4 [default = 0]; */ public Builder setNonceGroup(long value) { - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000004; nonceGroup_ = value; onChanged(); return this; @@ -6500,7 +6452,7 @@ public final class MasterProtos { * optional uint64 nonce_group = 4 [default = 0]; */ public Builder clearNonceGroup() { - bitField0_ = (bitField0_ & ~0x00000008); + bitField0_ = (bitField0_ & ~0x00000004); nonceGroup_ = 0L; onChanged(); return this; @@ -6511,7 +6463,7 @@ public final class MasterProtos { * optional uint64 nonce = 5 [default = 0]; */ public boolean hasNonce() { - return ((bitField0_ & 0x00000010) == 0x00000010); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 nonce = 5 [default = 0]; @@ -6523,7 +6475,7 @@ public final class MasterProtos { * optional uint64 nonce = 5 [default = 0]; */ public Builder setNonce(long value) { - bitField0_ |= 0x00000010; + bitField0_ |= 0x00000008; nonce_ = value; onChanged(); return this; @@ -6532,7 +6484,7 @@ public final class MasterProtos { * optional uint64 nonce = 5 [default = 0]; */ public Builder clearNonce() { - bitField0_ = (bitField0_ & ~0x00000010); + bitField0_ = (bitField0_ & ~0x00000008); nonce_ = 0L; onChanged(); return this; @@ -6548,46 +6500,46 @@ public final class MasterProtos { } - // @@protoc_insertion_point(builder_scope:hbase.pb.DispatchMergingRegionsRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.MergeTableRegionsRequest) } - // @@protoc_insertion_point(class_scope:hbase.pb.DispatchMergingRegionsRequest) - private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:hbase.pb.MergeTableRegionsRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest(); + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest(); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest getDefaultInstance() { + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser - PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { - public DispatchMergingRegionsRequest parsePartialFrom( + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public MergeTableRegionsRequest parsePartialFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return new DispatchMergingRegionsRequest(input, extensionRegistry); + return new MergeTableRegionsRequest(input, extensionRegistry); } }; - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { return PARSER; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest getDefaultInstanceForType() { + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface DispatchMergingRegionsResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.DispatchMergingRegionsResponse) + public interface MergeTableRegionsResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsResponse) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** @@ -6600,17 +6552,17 @@ public final class MasterProtos { long getProcId(); } /** - * Protobuf type {@code hbase.pb.DispatchMergingRegionsResponse} + * Protobuf type {@code hbase.pb.MergeTableRegionsResponse} */ - public static final class DispatchMergingRegionsResponse extends + public static final class MergeTableRegionsResponse extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.DispatchMergingRegionsResponse) - DispatchMergingRegionsResponseOrBuilder { - // Use DispatchMergingRegionsResponse.newBuilder() to construct. - private DispatchMergingRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsResponse) + MergeTableRegionsResponseOrBuilder { + // Use MergeTableRegionsResponse.newBuilder() to construct. + private MergeTableRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private DispatchMergingRegionsResponse() { + private MergeTableRegionsResponse() { procId_ = 0L; } @@ -6619,7 +6571,7 @@ public final class MasterProtos { getUnknownFields() { return this.unknownFields; } - private DispatchMergingRegionsResponse( + private MergeTableRegionsResponse( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { @@ -6661,14 +6613,14 @@ public final class MasterProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.Builder.class); } private int bitField0_; @@ -6725,1508 +6677,10 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) obj; - - boolean result = true; - result = result && (hasProcId() == other.hasProcId()); - if (hasProcId()) { - result = result && (getProcId() - == other.getProcId()); - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasProcId()) { - hash = (37 * hash) + PROC_ID_FIELD_NUMBER; - hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( - getProcId()); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(byte[] data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom( - byte[] data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseDelimitedFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.DispatchMergingRegionsResponse} - */ - public static final class Builder extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:hbase.pb.DispatchMergingRegionsResponse) - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponseOrBuilder { - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - public Builder clear() { - super.clear(); - procId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse build() { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse buildPartial() { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.procId_ = procId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder clone() { - return (Builder) super.clone(); - } - public Builder setField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.setField(field, value); - } - public Builder clearField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - public Builder clearOneof( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - public Builder setRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - int index, Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - public Builder addRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.addRepeatedField(field, value); - } - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) { - return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse other) { - if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance()) return this; - if (other.hasProcId()) { - setProcId(other.getProcId()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private long procId_ ; - /** - * optional uint64 proc_id = 1; - */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional uint64 proc_id = 1; - */ - public long getProcId() { - return procId_; - } - /** - * optional uint64 proc_id = 1; - */ - public Builder setProcId(long value) { - bitField0_ |= 0x00000001; - procId_ = value; - onChanged(); - return this; - } - /** - * optional uint64 proc_id = 1; - */ - public Builder clearProcId() { - bitField0_ = (bitField0_ & ~0x00000001); - procId_ = 0L; - onChanged(); - return this; - } - public final Builder setUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - public final Builder mergeUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:hbase.pb.DispatchMergingRegionsResponse) - } - - // @@protoc_insertion_point(class_scope:hbase.pb.DispatchMergingRegionsResponse) - private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse(); - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser - PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { - public DispatchMergingRegionsResponse parsePartialFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return new DispatchMergingRegionsResponse(input, extensionRegistry); - } - }; - - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface MergeTableRegionsRequestOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsRequest) - org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { - - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - java.util.List - getRegionList(); - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index); - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - int getRegionCount(); - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - java.util.List - getRegionOrBuilderList(); - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( - int index); - - /** - * optional bool forcible = 3 [default = false]; - */ - boolean hasForcible(); - /** - * optional bool forcible = 3 [default = false]; - */ - boolean getForcible(); - - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - boolean hasNonceGroup(); - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - long getNonceGroup(); - - /** - * optional uint64 nonce = 5 [default = 0]; - */ - boolean hasNonce(); - /** - * optional uint64 nonce = 5 [default = 0]; - */ - long getNonce(); - } - /** - *
-   **
-   * Merging the specified regions in a table.
-   * 
- * - * Protobuf type {@code hbase.pb.MergeTableRegionsRequest} - */ - public static final class MergeTableRegionsRequest extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsRequest) - MergeTableRegionsRequestOrBuilder { - // Use MergeTableRegionsRequest.newBuilder() to construct. - private MergeTableRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private MergeTableRegionsRequest() { - region_ = java.util.Collections.emptyList(); - forcible_ = false; - nonceGroup_ = 0L; - nonce_ = 0L; - } - - @java.lang.Override - public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private MergeTableRegionsRequest( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - this(); - int mutable_bitField0_ = 0; - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - region_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - region_.add( - input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry)); - break; - } - case 24: { - bitField0_ |= 0x00000001; - forcible_ = input.readBool(); - break; - } - case 32: { - bitField0_ |= 0x00000002; - nonceGroup_ = input.readUInt64(); - break; - } - case 40: { - bitField0_ |= 0x00000004; - nonce_ = input.readUInt64(); - break; - } - } - } - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - region_ = java.util.Collections.unmodifiableList(region_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class); - } - - private int bitField0_; - public static final int REGION_FIELD_NUMBER = 1; - private java.util.List region_; - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public java.util.List getRegionList() { - return region_; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public java.util.List - getRegionOrBuilderList() { - return region_; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public int getRegionCount() { - return region_.size(); - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { - return region_.get(index); - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( - int index) { - return region_.get(index); - } - - public static final int FORCIBLE_FIELD_NUMBER = 3; - private boolean forcible_; - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean hasForcible() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean getForcible() { - return forcible_; - } - - public static final int NONCE_GROUP_FIELD_NUMBER = 4; - private long nonceGroup_; - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - public long getNonceGroup() { - return nonceGroup_; - } - - public static final int NONCE_FIELD_NUMBER = 5; - private long nonce_; - /** - * optional uint64 nonce = 5 [default = 0]; - */ - public boolean hasNonce() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 nonce = 5 [default = 0]; - */ - public long getNonce() { - return nonce_; - } - - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - for (int i = 0; i < getRegionCount(); i++) { - if (!getRegion(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - for (int i = 0; i < region_.size(); i++) { - output.writeMessage(1, region_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(3, forcible_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(4, nonceGroup_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(5, nonce_); - } - unknownFields.writeTo(output); - } - - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < region_.size(); i++) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(1, region_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeBoolSize(3, forcible_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, nonceGroup_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, nonce_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) obj; - - boolean result = true; - result = result && getRegionList() - .equals(other.getRegionList()); - result = result && (hasForcible() == other.hasForcible()); - if (hasForcible()) { - result = result && (getForcible() - == other.getForcible()); - } - result = result && (hasNonceGroup() == other.hasNonceGroup()); - if (hasNonceGroup()) { - result = result && (getNonceGroup() - == other.getNonceGroup()); - } - result = result && (hasNonce() == other.hasNonce()); - if (hasNonce()) { - result = result && (getNonce() - == other.getNonce()); - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getRegionCount() > 0) { - hash = (37 * hash) + REGION_FIELD_NUMBER; - hash = (53 * hash) + getRegionList().hashCode(); - } - if (hasForcible()) { - hash = (37 * hash) + FORCIBLE_FIELD_NUMBER; - hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( - getForcible()); - } - if (hasNonceGroup()) { - hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER; - hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( - getNonceGroup()); - } - if (hasNonce()) { - hash = (37 * hash) + NONCE_FIELD_NUMBER; - hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( - getNonce()); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(byte[] data) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( - byte[] data, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom( - java.io.InputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - *
-     **
-     * Merging the specified regions in a table.
-     * 
- * - * Protobuf type {@code hbase.pb.MergeTableRegionsRequest} - */ - public static final class Builder extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:hbase.pb.MergeTableRegionsRequest) - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequestOrBuilder { - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - getRegionFieldBuilder(); - } - } - public Builder clear() { - super.clear(); - if (regionBuilder_ == null) { - region_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - regionBuilder_.clear(); - } - forcible_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - nonceGroup_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - nonce_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest build() { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest buildPartial() { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (regionBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - region_ = java.util.Collections.unmodifiableList(region_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.region_ = region_; - } else { - result.region_ = regionBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000001; - } - result.forcible_ = forcible_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.nonceGroup_ = nonceGroup_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.nonce_ = nonce_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder clone() { - return (Builder) super.clone(); - } - public Builder setField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.setField(field, value); - } - public Builder clearField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - public Builder clearOneof( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - public Builder setRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - int index, Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - public Builder addRepeatedField( - org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, - Object value) { - return (Builder) super.addRepeatedField(field, value); - } - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) { - return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other) { - if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance()) return this; - if (regionBuilder_ == null) { - if (!other.region_.isEmpty()) { - if (region_.isEmpty()) { - region_ = other.region_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureRegionIsMutable(); - region_.addAll(other.region_); - } - onChanged(); - } - } else { - if (!other.region_.isEmpty()) { - if (regionBuilder_.isEmpty()) { - regionBuilder_.dispose(); - regionBuilder_ = null; - region_ = other.region_; - bitField0_ = (bitField0_ & ~0x00000001); - regionBuilder_ = - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getRegionFieldBuilder() : null; - } else { - regionBuilder_.addAllMessages(other.region_); - } - } - } - if (other.hasForcible()) { - setForcible(other.getForcible()); - } - if (other.hasNonceGroup()) { - setNonceGroup(other.getNonceGroup()); - } - if (other.hasNonce()) { - setNonce(other.getNonce()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getRegionCount(); i++) { - if (!getRegion(i).isInitialized()) { - return false; - } - } - return true; - } - - public Builder mergeFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.util.List region_ = - java.util.Collections.emptyList(); - private void ensureRegionIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - region_ = new java.util.ArrayList(region_); - bitField0_ |= 0x00000001; - } - } - - private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; - - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public java.util.List getRegionList() { - if (regionBuilder_ == null) { - return java.util.Collections.unmodifiableList(region_); - } else { - return regionBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public int getRegionCount() { - if (regionBuilder_ == null) { - return region_.size(); - } else { - return regionBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { - if (regionBuilder_ == null) { - return region_.get(index); - } else { - return regionBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder setRegion( - int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRegionIsMutable(); - region_.set(index, value); - onChanged(); - } else { - regionBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder setRegion( - int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.set(index, builderForValue.build()); - onChanged(); - } else { - regionBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder addRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRegionIsMutable(); - region_.add(value); - onChanged(); - } else { - regionBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder addRegion( - int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { - if (regionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRegionIsMutable(); - region_.add(index, value); - onChanged(); - } else { - regionBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder addRegion( - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.add(builderForValue.build()); - onChanged(); - } else { - regionBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder addRegion( - int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.add(index, builderForValue.build()); - onChanged(); - } else { - regionBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder addAllRegion( - java.lang.Iterable values) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, region_); - onChanged(); - } else { - regionBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder clearRegion() { - if (regionBuilder_ == null) { - region_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - regionBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public Builder removeRegion(int index) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.remove(index); - onChanged(); - } else { - regionBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder( - int index) { - return getRegionFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( - int index) { - if (regionBuilder_ == null) { - return region_.get(index); } else { - return regionBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public java.util.List - getRegionOrBuilderList() { - if (regionBuilder_ != null) { - return regionBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(region_); - } - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder() { - return getRegionFieldBuilder().addBuilder( - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder( - int index) { - return getRegionFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); - } - /** - * repeated .hbase.pb.RegionSpecifier region = 1; - */ - public java.util.List - getRegionBuilderList() { - return getRegionFieldBuilder().getBuilderList(); - } - private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> - getRegionFieldBuilder() { - if (regionBuilder_ == null) { - regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - region_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - region_ = null; - } - return regionBuilder_; - } - - private boolean forcible_ ; - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean hasForcible() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool forcible = 3 [default = false]; - */ - public boolean getForcible() { - return forcible_; - } - /** - * optional bool forcible = 3 [default = false]; - */ - public Builder setForcible(boolean value) { - bitField0_ |= 0x00000002; - forcible_ = value; - onChanged(); - return this; - } - /** - * optional bool forcible = 3 [default = false]; - */ - public Builder clearForcible() { - bitField0_ = (bitField0_ & ~0x00000002); - forcible_ = false; - onChanged(); - return this; - } - - private long nonceGroup_ ; - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - public long getNonceGroup() { - return nonceGroup_; - } - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - public Builder setNonceGroup(long value) { - bitField0_ |= 0x00000004; - nonceGroup_ = value; - onChanged(); - return this; - } - /** - * optional uint64 nonce_group = 4 [default = 0]; - */ - public Builder clearNonceGroup() { - bitField0_ = (bitField0_ & ~0x00000004); - nonceGroup_ = 0L; - onChanged(); - return this; - } - - private long nonce_ ; - /** - * optional uint64 nonce = 5 [default = 0]; - */ - public boolean hasNonce() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional uint64 nonce = 5 [default = 0]; - */ - public long getNonce() { - return nonce_; - } - /** - * optional uint64 nonce = 5 [default = 0]; - */ - public Builder setNonce(long value) { - bitField0_ |= 0x00000008; - nonce_ = value; - onChanged(); - return this; - } - /** - * optional uint64 nonce = 5 [default = 0]; - */ - public Builder clearNonce() { - bitField0_ = (bitField0_ & ~0x00000008); - nonce_ = 0L; - onChanged(); - return this; - } - public final Builder setUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - public final Builder mergeUnknownFields( - final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:hbase.pb.MergeTableRegionsRequest) - } - - // @@protoc_insertion_point(class_scope:hbase.pb.MergeTableRegionsRequest) - private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest(); - } - - public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser - PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { - public MergeTableRegionsRequest parsePartialFrom( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return new MergeTableRegionsRequest(input, extensionRegistry); - } - }; - - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface MergeTableRegionsResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsResponse) - org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { - - /** - * optional uint64 proc_id = 1; - */ - boolean hasProcId(); - /** - * optional uint64 proc_id = 1; - */ - long getProcId(); - } - /** - * Protobuf type {@code hbase.pb.MergeTableRegionsResponse} - */ - public static final class MergeTableRegionsResponse extends - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsResponse) - MergeTableRegionsResponseOrBuilder { - // Use MergeTableRegionsResponse.newBuilder() to construct. - private MergeTableRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private MergeTableRegionsResponse() { - procId_ = 0L; - } - - @java.lang.Override - public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private MergeTableRegionsResponse( - org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, - org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - this(); - int mutable_bitField0_ = 0; - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = - org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - procId_ = input.readUInt64(); - break; - } - } - } - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_descriptor; - } - - protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.Builder.class); - } - - private int bitField0_; - public static final int PROC_ID_FIELD_NUMBER = 1; - private long procId_; - /** - * optional uint64 proc_id = 1; - */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional uint64 proc_id = 1; - */ - public long getProcId() { - return procId_; - } - - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, procId_); - } - unknownFields.writeTo(output); - } - - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, procId_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse) obj; + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse) obj; boolean result = true; result = result && (hasProcId() == other.hasProcId()); @@ -69110,18 +67564,6 @@ public final class MasterProtos { /** *
-       ** Master dispatch merging the regions 
-       * 
- * - * rpc DispatchMergingRegions(.hbase.pb.DispatchMergingRegionsRequest) returns (.hbase.pb.DispatchMergingRegionsResponse); - */ - public abstract void dispatchMergingRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); - - /** - *
        ** Master merge the regions 
        * 
* @@ -69923,14 +68365,6 @@ public final class MasterProtos { } @java.lang.Override - public void dispatchMergingRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { - impl.dispatchMergingRegions(controller, request, done); - } - - @java.lang.Override public void mergeTableRegions( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request, @@ -70435,122 +68869,120 @@ public final class MasterProtos { case 8: return impl.moveRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest)request); case 9: - return impl.dispatchMergingRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest)request); - case 10: return impl.mergeTableRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)request); - case 11: + case 10: return impl.assignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest)request); - case 12: + case 11: return impl.unassignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest)request); - case 13: + case 12: return impl.offlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest)request); - case 14: + case 13: return impl.deleteTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest)request); - case 15: + case 14: return impl.truncateTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest)request); - case 16: + case 15: return impl.enableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest)request); - case 17: + case 16: return impl.disableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest)request); - case 18: + case 17: return impl.modifyTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest)request); - case 19: + case 18: return impl.createTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest)request); - case 20: + case 19: return impl.shutdown(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest)request); - case 21: + case 20: return impl.stopMaster(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest)request); - case 22: + case 21: return impl.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request); - case 23: + case 22: return impl.balance(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest)request); - case 24: + case 23: return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request); - case 25: + case 24: return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request); - case 26: + case 25: return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request); - case 27: + case 26: return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request); - case 28: + case 27: return impl.normalize(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest)request); - case 29: + case 28: return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); - case 30: + case 29: return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); - case 31: + case 30: return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); - case 32: + case 31: return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); - case 33: + case 32: return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); - case 34: + case 33: return impl.execMasterService(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); - case 35: + case 34: return impl.snapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest)request); - case 36: + case 35: return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); - case 37: + case 36: return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); - case 38: + case 37: return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); - case 39: + case 38: return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); - case 40: + case 39: return impl.execProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request); - case 41: + case 40: return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request); - case 42: + case 41: return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); - case 43: + case 42: return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); - case 44: + case 43: return impl.createNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); - case 45: + case 44: return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); - case 46: + case 45: return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); - case 47: + case 46: return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); - case 48: + case 47: return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); - case 49: + case 48: return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); - case 50: + case 49: return impl.getTableState(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest)request); - case 51: + case 50: return impl.setQuota(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest)request); - case 52: + case 51: return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); - case 53: + case 52: return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); - case 54: + case 53: return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); - case 55: + case 54: return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); - case 56: + case 55: return impl.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request); - case 57: + case 56: return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request); - case 58: + case 57: return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request); - case 59: + case 58: return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request); - case 60: + case 59: return impl.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request); - case 61: + case 60: return impl.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request); - case 62: + case 61: return impl.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request); - case 63: + case 62: return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request); - case 64: + case 63: return impl.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request); - case 65: + case 64: return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request); - case 66: + case 65: return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request); - case 67: + case 66: return impl.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -70585,122 +69017,120 @@ public final class MasterProtos { case 8: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance(); - case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance(); - case 11: + case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); - case 12: + case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); - case 13: + case 12: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest.getDefaultInstance(); - case 19: + case 18: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest.getDefaultInstance(); - case 20: + case 19: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); - case 21: + case 20: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); - case 22: + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); - case 23: + case 22: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); - case 24: + case 23: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); - case 25: + case 24: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); - case 26: + case 25: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); - case 27: + case 26: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); - case 39: + case 38: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + case 39: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); - case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); - case 51: + case 50: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); - case 52: + case 51: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); - case 53: + case 52: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); - case 54: + case 53: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); - case 57: + case 56: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); - case 58: + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance(); - case 59: + case 58: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance(); - case 60: + case 59: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance(); - case 61: + case 60: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance(); - case 62: + case 61: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance(); - case 63: + case 62: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance(); - case 64: + case 63: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance(); - case 65: + case 64: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance(); - case 66: + case 65: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance(); - case 67: + case 66: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -70735,122 +69165,120 @@ public final class MasterProtos { case 8: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance(); - case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance(); - case 11: + case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); - case 12: + case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); - case 13: + case 12: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(); - case 19: + case 18: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(); - case 20: + case 19: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); - case 21: + case 20: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); - case 22: + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); - case 23: + case 22: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); - case 24: + case 23: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); - case 25: + case 24: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); - case 26: + case 25: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); - case 27: + case 26: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); - case 39: + case 38: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + case 39: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); - case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); - case 51: + case 50: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 51: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); - case 54: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); - case 57: + case 56: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); - case 58: + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(); - case 59: + case 58: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(); - case 60: + case 59: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance(); - case 61: + case 60: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance(); - case 62: + case 61: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance(); - case 63: + case 62: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance(); - case 64: + case 63: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(); - case 65: + case 64: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(); - case 66: + case 65: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(); - case 67: + case 66: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -70970,18 +69398,6 @@ public final class MasterProtos { /** *
-     ** Master dispatch merging the regions 
-     * 
- * - * rpc DispatchMergingRegions(.hbase.pb.DispatchMergingRegionsRequest) returns (.hbase.pb.DispatchMergingRegionsResponse); - */ - public abstract void dispatchMergingRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); - - /** - *
      ** Master merge the regions 
      * 
* @@ -71773,296 +70189,291 @@ public final class MasterProtos { done)); return; case 9: - this.dispatchMergingRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest)request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 10: this.mergeTableRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 11: + case 10: this.assignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 11: this.unassignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 12: this.offlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 13: this.deleteTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 14: this.truncateTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 16: + case 15: this.enableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 17: + case 16: this.disableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 18: + case 17: this.modifyTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 19: + case 18: this.createTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 20: + case 19: this.shutdown(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 21: + case 20: this.stopMaster(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 22: + case 21: this.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 23: + case 22: this.balance(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 24: + case 23: this.setBalancerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 25: + case 24: this.isBalancerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 26: + case 25: this.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 27: + case 26: this.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 28: + case 27: this.normalize(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 29: + case 28: this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 30: + case 29: this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 31: + case 30: this.runCatalogScan(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 32: + case 31: this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 33: + case 32: this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 34: + case 33: this.execMasterService(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 35: + case 34: this.snapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 36: + case 35: this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 37: + case 36: this.deleteSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 38: + case 37: this.isSnapshotDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 39: + case 38: this.restoreSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 40: + case 39: this.execProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 41: + case 40: this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 42: + case 41: this.isProcedureDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 43: + case 42: this.modifyNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 44: + case 43: this.createNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 45: + case 44: this.deleteNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 46: + case 45: this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 47: + case 46: this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 48: + case 47: this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 49: + case 48: this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 50: + case 49: this.getTableState(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 51: + case 50: this.setQuota(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 52: + case 51: this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 53: + case 52: this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 54: + case 53: this.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 55: + case 54: this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 56: + case 55: this.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 57: + case 56: this.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 58: + case 57: this.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 59: + case 58: this.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 60: + case 59: this.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 61: + case 60: this.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 62: + case 61: this.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 63: + case 62: this.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 64: + case 63: this.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 65: + case 64: this.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 66: + case 65: this.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 67: + case 66: this.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -72100,122 +70511,120 @@ public final class MasterProtos { case 8: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance(); - case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance(); - case 11: + case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); - case 12: + case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); - case 13: + case 12: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest.getDefaultInstance(); - case 19: + case 18: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest.getDefaultInstance(); - case 20: + case 19: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); - case 21: + case 20: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); - case 22: + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); - case 23: + case 22: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); - case 24: + case 23: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); - case 25: + case 24: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); - case 26: + case 25: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); - case 27: + case 26: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); - case 39: + case 38: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + case 39: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); - case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); - case 51: + case 50: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); - case 52: + case 51: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); - case 53: + case 52: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); - case 54: + case 53: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); - case 57: + case 56: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); - case 58: + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance(); - case 59: + case 58: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance(); - case 60: + case 59: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance(); - case 61: + case 60: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance(); - case 62: + case 61: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance(); - case 63: + case 62: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance(); - case 64: + case 63: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance(); - case 65: + case 64: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance(); - case 66: + case 65: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance(); - case 67: + case 66: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -72250,122 +70659,120 @@ public final class MasterProtos { case 8: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance(); - case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance(); - case 11: + case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); - case 12: + case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); - case 13: + case 12: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(); - case 14: + case 13: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(); - case 15: + case 14: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); - case 16: + case 15: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(); - case 17: + case 16: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(); - case 18: + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(); - case 19: + case 18: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(); - case 20: + case 19: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); - case 21: + case 20: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); - case 22: + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); - case 23: + case 22: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); - case 24: + case 23: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); - case 25: + case 24: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); - case 26: + case 25: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); - case 27: + case 26: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); - case 39: + case 38: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + case 39: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); - case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); - case 51: + case 50: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 51: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); - case 54: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); - case 57: + case 56: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); - case 58: + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(); - case 59: + case 58: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(); - case 60: + case 59: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance(); - case 61: + case 60: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance(); - case 62: + case 61: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance(); - case 63: + case 62: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance(); - case 64: + case 63: return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(); - case 65: + case 64: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(); - case 66: + case 65: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(); - case 67: + case 66: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -72523,27 +70930,12 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse.getDefaultInstance())); } - public void dispatchMergingRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(9), - controller, - request, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance(), - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.class, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance())); - } - public void mergeTableRegions( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance(), @@ -72558,7 +70950,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(), @@ -72573,7 +70965,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(), @@ -72588,7 +70980,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(), @@ -72603,7 +70995,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(), @@ -72618,7 +71010,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(), @@ -72633,7 +71025,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(), @@ -72648,7 +71040,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(), @@ -72663,7 +71055,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(), @@ -72678,7 +71070,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(), @@ -72693,7 +71085,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(20), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(), @@ -72708,7 +71100,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(21), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(), @@ -72723,7 +71115,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(22), + getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(), @@ -72738,7 +71130,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(), @@ -72753,7 +71145,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(), @@ -72768,7 +71160,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(), @@ -72783,7 +71175,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(), @@ -72798,7 +71190,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(), @@ -72813,7 +71205,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(), @@ -72828,7 +71220,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(), @@ -72843,7 +71235,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(), @@ -72858,7 +71250,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(), @@ -72873,7 +71265,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(), @@ -72888,7 +71280,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), @@ -72903,7 +71295,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(), @@ -72918,7 +71310,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(), @@ -72933,7 +71325,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(), @@ -72948,7 +71340,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(), @@ -72963,7 +71355,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(), @@ -72978,7 +71370,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(), @@ -72993,7 +71385,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -73008,7 +71400,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -73023,7 +71415,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(), @@ -73038,7 +71430,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(), @@ -73053,7 +71445,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(), @@ -73068,7 +71460,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(), @@ -73083,7 +71475,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(), @@ -73098,7 +71490,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(), @@ -73113,7 +71505,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(), @@ -73128,7 +71520,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(), @@ -73143,7 +71535,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), @@ -73158,7 +71550,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(), @@ -73173,7 +71565,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -73188,7 +71580,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -73203,7 +71595,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), @@ -73218,7 +71610,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(), @@ -73233,7 +71625,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(56), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(), @@ -73248,7 +71640,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(57), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(), @@ -73263,7 +71655,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(58), + getDescriptor().getMethods().get(57), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(), @@ -73278,7 +71670,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(59), + getDescriptor().getMethods().get(58), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(), @@ -73293,7 +71685,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(60), + getDescriptor().getMethods().get(59), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance(), @@ -73308,7 +71700,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(61), + getDescriptor().getMethods().get(60), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance(), @@ -73323,7 +71715,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(62), + getDescriptor().getMethods().get(61), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance(), @@ -73338,7 +71730,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(63), + getDescriptor().getMethods().get(62), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance(), @@ -73353,7 +71745,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(64), + getDescriptor().getMethods().get(63), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(), @@ -73368,7 +71760,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(65), + getDescriptor().getMethods().get(64), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(), @@ -73383,7 +71775,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(66), + getDescriptor().getMethods().get(65), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(), @@ -73398,7 +71790,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(67), + getDescriptor().getMethods().get(66), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(), @@ -73460,11 +71852,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse mergeTableRegions( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request) @@ -73871,24 +72258,12 @@ public final class MasterProtos { } - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request) - throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), - controller, - request, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance()); - } - - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse mergeTableRegions( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance()); @@ -73900,7 +72275,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance()); @@ -73912,7 +72287,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance()); @@ -73924,7 +72299,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance()); @@ -73936,7 +72311,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance()); @@ -73948,7 +72323,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance()); @@ -73960,7 +72335,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance()); @@ -73972,7 +72347,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance()); @@ -73984,7 +72359,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance()); @@ -73996,7 +72371,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance()); @@ -74008,7 +72383,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(20), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance()); @@ -74020,7 +72395,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(21), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()); @@ -74032,7 +72407,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(22), + getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()); @@ -74044,7 +72419,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()); @@ -74056,7 +72431,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance()); @@ -74068,7 +72443,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance()); @@ -74080,7 +72455,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance()); @@ -74092,7 +72467,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance()); @@ -74104,7 +72479,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance()); @@ -74116,7 +72491,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance()); @@ -74128,7 +72503,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance()); @@ -74140,7 +72515,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); @@ -74152,7 +72527,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); @@ -74164,7 +72539,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); @@ -74176,7 +72551,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); @@ -74188,7 +72563,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); @@ -74200,7 +72575,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); @@ -74212,7 +72587,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); @@ -74224,7 +72599,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); @@ -74236,7 +72611,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); @@ -74248,7 +72623,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -74260,7 +72635,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -74272,7 +72647,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); @@ -74284,7 +72659,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); @@ -74296,7 +72671,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); @@ -74308,7 +72683,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); @@ -74320,7 +72695,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); @@ -74332,7 +72707,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); @@ -74344,7 +72719,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); @@ -74356,7 +72731,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); @@ -74368,7 +72743,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); @@ -74380,7 +72755,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); @@ -74392,7 +72767,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -74404,7 +72779,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -74416,7 +72791,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); @@ -74428,7 +72803,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); @@ -74440,7 +72815,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(56), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); @@ -74452,7 +72827,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(57), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); @@ -74464,7 +72839,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(58), + getDescriptor().getMethods().get(57), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance()); @@ -74476,7 +72851,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(59), + getDescriptor().getMethods().get(58), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance()); @@ -74488,7 +72863,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(60), + getDescriptor().getMethods().get(59), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance()); @@ -74500,7 +72875,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(61), + getDescriptor().getMethods().get(60), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance()); @@ -74512,7 +72887,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(62), + getDescriptor().getMethods().get(61), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance()); @@ -74524,7 +72899,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(63), + getDescriptor().getMethods().get(62), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance()); @@ -74536,7 +72911,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(64), + getDescriptor().getMethods().get(63), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance()); @@ -74548,7 +72923,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(65), + getDescriptor().getMethods().get(64), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance()); @@ -74560,7 +72935,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(66), + getDescriptor().getMethods().get(65), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance()); @@ -74572,7 +72947,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(67), + getDescriptor().getMethods().get(66), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance()); @@ -74624,16 +72999,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_MoveRegionResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_DispatchMergingRegionsRequest_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_DispatchMergingRegionsResponse_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -75208,359 +73573,350 @@ public final class MasterProtos { "gionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" + "egionSpecifier\022.\n\020dest_server_name\030\002 \001(\013", "2\024.hbase.pb.ServerName\"\024\n\022MoveRegionResp" + - "onse\"\274\001\n\035DispatchMergingRegionsRequest\022+" + - "\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpecif" + - "ier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionS" + - "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\026\n\013non" + - "ce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\"1\n\036D" + - "ispatchMergingRegionsResponse\022\017\n\007proc_id" + - "\030\001 \001(\004\"\210\001\n\030MergeTableRegionsRequest\022)\n\006r" + - "egion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\027" + - "\n\010forcible\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004", - " \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\",\n\031MergeTableR" + - "egionsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023Assig" + - "nRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.p" + - "b.RegionSpecifier\"\026\n\024AssignRegionRespons" + - "e\"X\n\025UnassignRegionRequest\022)\n\006region\030\001 \002" + - "(\0132\031.hbase.pb.RegionSpecifier\022\024\n\005force\030\002" + - " \001(\010:\005false\"\030\n\026UnassignRegionResponse\"A\n" + - "\024OfflineRegionRequest\022)\n\006region\030\001 \002(\0132\031." + - "hbase.pb.RegionSpecifier\"\027\n\025OfflineRegio" + - "nResponse\"\177\n\022CreateTableRequest\022+\n\014table", - "_schema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\n" + - "split_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010" + - "\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023CreateTableRespons" + - "e\022\017\n\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest" + - "\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNam" + - "e\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004" + - ":\0010\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 " + - "\001(\004\"\207\001\n\024TruncateTableRequest\022&\n\ttableNam" + - "e\030\001 \002(\0132\023.hbase.pb.TableName\022\035\n\016preserve" + - "Splits\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004", - ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableRe" + - "sponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableRe" + - "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" + - "leName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" + - "\003 \001(\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_" + - "id\030\001 \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable" + - "_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonc" + - "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Di" + - "sableTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022" + - "ModifyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023", - ".hbase.pb.TableName\022+\n\014table_schema\030\002 \002(" + - "\0132\025.hbase.pb.TableSchema\022\026\n\013nonce_group\030" + - "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTabl" + - "eResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateName" + - "spaceRequest\022:\n\023namespaceDescriptor\030\001 \002(" + - "\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013nonc" + - "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Cr" + - "eateNamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y" + - "\n\026DeleteNamespaceRequest\022\025\n\rnamespaceNam" + - "e\030\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce", - "\030\003 \001(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007" + - "proc_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022" + - ":\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase.pb." + - "NamespaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004" + - ":\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespace" + - "Response\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespac" + - "eDescriptorRequest\022\025\n\rnamespaceName\030\001 \002(" + - "\t\"\\\n\036GetNamespaceDescriptorResponse\022:\n\023n" + - "amespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Name" + - "spaceDescriptor\"!\n\037ListNamespaceDescript", - "orsRequest\"^\n ListNamespaceDescriptorsRe" + - "sponse\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hb" + - "ase.pb.NamespaceDescriptor\"?\n&ListTableD" + - "escriptorsByNamespaceRequest\022\025\n\rnamespac" + - "eName\030\001 \002(\t\"U\n\'ListTableDescriptorsByNam" + - "espaceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hb" + - "ase.pb.TableSchema\"9\n ListTableNamesByNa" + - "mespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n" + - "!ListTableNamesByNamespaceResponse\022&\n\tta" + - "bleName\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Sh", - "utdownRequest\"\022\n\020ShutdownResponse\"\023\n\021Sto" + - "pMasterRequest\"\024\n\022StopMasterResponse\"\034\n\032" + - "IsInMaintenanceModeRequest\"8\n\033IsInMainte" + - "nanceModeResponse\022\031\n\021inMaintenanceMode\030\001" + - " \002(\010\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n" + - "\017BalanceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<" + - "\n\031SetBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022" + - "\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRunni" + - "ngResponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\032" + - "\n\030IsBalancerEnabledRequest\",\n\031IsBalancer", - "EnabledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetS" + - "plitOrMergeEnabledRequest\022\017\n\007enabled\030\001 \002" + - "(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030" + - "\003 \003(\0162\032.hbase.pb.MasterSwitchType\"4\n\036Set" + - "SplitOrMergeEnabledResponse\022\022\n\nprev_valu" + - "e\030\001 \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022" + - "/\n\013switch_type\030\001 \002(\0162\032.hbase.pb.MasterSw" + - "itchType\"0\n\035IsSplitOrMergeEnabledRespons" + - "e\022\017\n\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+" + - "\n\021NormalizeResponse\022\026\n\016normalizer_ran\030\001 ", - "\002(\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on" + - "\030\001 \002(\010\"=\n\034SetNormalizerRunningResponse\022\035" + - "\n\025prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNorma" + - "lizerEnabledRequest\".\n\033IsNormalizerEnabl" + - "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" + - "gScanRequest\"-\n\026RunCatalogScanResponse\022\023" + - "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" + - "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" + - "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " + - "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa", - "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" + - "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" + - "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" + - "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" + - "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" + - "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013" + - "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" + - "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" + - "se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" + - "hotResponse\"s\n\026RestoreSnapshotRequest\022/\n", - "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" + - "iption\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" + - "\003 \001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007p" + - "roc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n" + - "\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescr" + - "iption\"^\n\026IsSnapshotDoneResponse\022\023\n\004done" + - "\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase." + - "pb.SnapshotDescription\"O\n\034IsRestoreSnaps" + - "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" + - ".pb.SnapshotDescription\"4\n\035IsRestoreSnap", - "shotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n" + - "\033GetSchemaAlterStatusRequest\022\'\n\ntable_na" + - "me\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSche" + - "maAlterStatusResponse\022\035\n\025yet_to_update_r" + - "egions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032" + - "GetTableDescriptorsRequest\022(\n\013table_name" + - "s\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 " + - "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" + - "\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescriptors" + - "Response\022+\n\014table_schema\030\001 \003(\0132\025.hbase.p", - "b.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" + - "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" + - ":\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNa" + - "mesResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase" + - ".pb.TableName\"?\n\024GetTableStateRequest\022\'\n" + - "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"B" + - "\n\025GetTableStateResponse\022)\n\013table_state\030\001" + - " \002(\0132\024.hbase.pb.TableState\"\031\n\027GetCluster" + - "StatusRequest\"K\n\030GetClusterStatusRespons" + - "e\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clu", - "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" + - "IsMasterRunningResponse\022\031\n\021is_master_run" + - "ning\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tpr" + - "ocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDescri" + - "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" + - "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n" + - "\026IsProcedureDoneRequest\0221\n\tprocedure\030\001 \001" + - "(\0132\036.hbase.pb.ProcedureDescription\"`\n\027Is" + - "ProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" + - "se\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Procedur", - "eDescription\",\n\031GetProcedureResultReques" + - "t\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResul" + - "tResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetP" + - "rocedureResultResponse.State\022\022\n\nstart_ti" + - "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" + - "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore" + - "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" + - "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" + - "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI" + - "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr", - "ocedureResponse\022\034\n\024is_procedure_aborted\030" + - "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" + - "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" + - "ase.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\t" + - "user_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tn" + - "amespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hba" + - "se.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" + - "ypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031." + - "hbase.pb.ThrottleRequest\"\022\n\020SetQuotaResp" + - "onse\"J\n\037MajorCompactionTimestampRequest\022", - "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName" + - "\"U\n(MajorCompactionTimestampForRegionReq" + - "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" + - "ecifier\"@\n MajorCompactionTimestampRespo" + - "nse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Sec" + - "urityCapabilitiesRequest\"\354\001\n\034SecurityCap" + - "abilitiesResponse\022G\n\014capabilities\030\001 \003(\0162" + - "1.hbase.pb.SecurityCapabilitiesResponse." + - "Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTH" + - "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022", - "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" + - "\020\003\022\023\n\017CELL_VISIBILITY\020\004\"\"\n ListDrainingR" + - "egionServersRequest\"N\n!ListDrainingRegio" + - "nServersResponse\022)\n\013server_name\030\001 \003(\0132\024." + - "hbase.pb.ServerName\"F\n\031DrainRegionServer" + - "sRequest\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb" + - ".ServerName\"\034\n\032DrainRegionServersRespons" + - "e\"P\n#RemoveDrainFromRegionServersRequest" + - "\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerN" + - "ame\"&\n$RemoveDrainFromRegionServersRespo", - "nse*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005ME" + - "RGE\020\0012\3771\n\rMasterService\022e\n\024GetSchemaAlte" + - "rStatus\022%.hbase.pb.GetSchemaAlterStatusR" + - "equest\032&.hbase.pb.GetSchemaAlterStatusRe" + - "sponse\022b\n\023GetTableDescriptors\022$.hbase.pb" + - ".GetTableDescriptorsRequest\032%.hbase.pb.G" + - "etTableDescriptorsResponse\022P\n\rGetTableNa" + - "mes\022\036.hbase.pb.GetTableNamesRequest\032\037.hb" + - "ase.pb.GetTableNamesResponse\022Y\n\020GetClust" + - "erStatus\022!.hbase.pb.GetClusterStatusRequ", - "est\032\".hbase.pb.GetClusterStatusResponse\022" + - "V\n\017IsMasterRunning\022 .hbase.pb.IsMasterRu" + - "nningRequest\032!.hbase.pb.IsMasterRunningR" + - "esponse\022D\n\tAddColumn\022\032.hbase.pb.AddColum" + - "nRequest\032\033.hbase.pb.AddColumnResponse\022M\n" + - "\014DeleteColumn\022\035.hbase.pb.DeleteColumnReq" + - "uest\032\036.hbase.pb.DeleteColumnResponse\022M\n\014" + - "ModifyColumn\022\035.hbase.pb.ModifyColumnRequ" + - "est\032\036.hbase.pb.ModifyColumnResponse\022G\n\nM" + - "oveRegion\022\033.hbase.pb.MoveRegionRequest\032\034", - ".hbase.pb.MoveRegionResponse\022k\n\026Dispatch" + - "MergingRegions\022\'.hbase.pb.DispatchMergin" + - "gRegionsRequest\032(.hbase.pb.DispatchMergi" + - "ngRegionsResponse\022\\\n\021MergeTableRegions\022\"" + - ".hbase.pb.MergeTableRegionsRequest\032#.hba" + - "se.pb.MergeTableRegionsResponse\022M\n\014Assig" + - "nRegion\022\035.hbase.pb.AssignRegionRequest\032\036" + - ".hbase.pb.AssignRegionResponse\022S\n\016Unassi" + - "gnRegion\022\037.hbase.pb.UnassignRegionReques" + - "t\032 .hbase.pb.UnassignRegionResponse\022P\n\rO", - "fflineRegion\022\036.hbase.pb.OfflineRegionReq" + - "uest\032\037.hbase.pb.OfflineRegionResponse\022J\n" + - "\013DeleteTable\022\034.hbase.pb.DeleteTableReque" + - "st\032\035.hbase.pb.DeleteTableResponse\022P\n\rtru" + - "ncateTable\022\036.hbase.pb.TruncateTableReque" + - "st\032\037.hbase.pb.TruncateTableResponse\022J\n\013E" + - "nableTable\022\034.hbase.pb.EnableTableRequest" + - "\032\035.hbase.pb.EnableTableResponse\022M\n\014Disab" + - "leTable\022\035.hbase.pb.DisableTableRequest\032\036" + - ".hbase.pb.DisableTableResponse\022J\n\013Modify", - "Table\022\034.hbase.pb.ModifyTableRequest\032\035.hb" + - "ase.pb.ModifyTableResponse\022J\n\013CreateTabl" + - "e\022\034.hbase.pb.CreateTableRequest\032\035.hbase." + - "pb.CreateTableResponse\022A\n\010Shutdown\022\031.hba" + - "se.pb.ShutdownRequest\032\032.hbase.pb.Shutdow" + - "nResponse\022G\n\nStopMaster\022\033.hbase.pb.StopM" + - "asterRequest\032\034.hbase.pb.StopMasterRespon" + - "se\022h\n\031IsMasterInMaintenanceMode\022$.hbase." + - "pb.IsInMaintenanceModeRequest\032%.hbase.pb" + - ".IsInMaintenanceModeResponse\022>\n\007Balance\022", - "\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Bal" + - "anceResponse\022_\n\022SetBalancerRunning\022#.hba" + - "se.pb.SetBalancerRunningRequest\032$.hbase." + - "pb.SetBalancerRunningResponse\022\\\n\021IsBalan" + - "cerEnabled\022\".hbase.pb.IsBalancerEnabledR" + - "equest\032#.hbase.pb.IsBalancerEnabledRespo" + - "nse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.pb" + - ".SetSplitOrMergeEnabledRequest\032(.hbase.p" + - "b.SetSplitOrMergeEnabledResponse\022h\n\025IsSp" + - "litOrMergeEnabled\022&.hbase.pb.IsSplitOrMe", - "rgeEnabledRequest\032\'.hbase.pb.IsSplitOrMe" + - "rgeEnabledResponse\022D\n\tNormalize\022\032.hbase." + - "pb.NormalizeRequest\032\033.hbase.pb.Normalize" + - "Response\022e\n\024SetNormalizerRunning\022%.hbase" + - ".pb.SetNormalizerRunningRequest\032&.hbase." + - "pb.SetNormalizerRunningResponse\022b\n\023IsNor" + - "malizerEnabled\022$.hbase.pb.IsNormalizerEn" + - "abledRequest\032%.hbase.pb.IsNormalizerEnab" + - "ledResponse\022S\n\016RunCatalogScan\022\037.hbase.pb" + - ".RunCatalogScanRequest\032 .hbase.pb.RunCat", - "alogScanResponse\022e\n\024EnableCatalogJanitor" + - "\022%.hbase.pb.EnableCatalogJanitorRequest\032" + - "&.hbase.pb.EnableCatalogJanitorResponse\022" + - "n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.Is" + - "CatalogJanitorEnabledRequest\032).hbase.pb." + - "IsCatalogJanitorEnabledResponse\022^\n\021ExecM" + - "asterService\022#.hbase.pb.CoprocessorServi" + - "ceRequest\032$.hbase.pb.CoprocessorServiceR" + - "esponse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotR" + - "equest\032\032.hbase.pb.SnapshotResponse\022h\n\025Ge", - "tCompletedSnapshots\022&.hbase.pb.GetComple" + - "tedSnapshotsRequest\032\'.hbase.pb.GetComple" + - "tedSnapshotsResponse\022S\n\016DeleteSnapshot\022\037" + - ".hbase.pb.DeleteSnapshotRequest\032 .hbase." + - "pb.DeleteSnapshotResponse\022S\n\016IsSnapshotD" + - "one\022\037.hbase.pb.IsSnapshotDoneRequest\032 .h" + - "base.pb.IsSnapshotDoneResponse\022V\n\017Restor" + - "eSnapshot\022 .hbase.pb.RestoreSnapshotRequ" + - "est\032!.hbase.pb.RestoreSnapshotResponse\022P" + - "\n\rExecProcedure\022\036.hbase.pb.ExecProcedure", - "Request\032\037.hbase.pb.ExecProcedureResponse" + - "\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exec" + + "onse\"\210\001\n\030MergeTableRegionsRequest\022)\n\006reg" + + "ion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010" + + "forcible\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004 \001" + + "(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\",\n\031MergeTableReg" + + "ionsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023AssignR" + + "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." + + "RegionSpecifier\"\026\n\024AssignRegionResponse\"" + + "X\n\025UnassignRegionRequest\022)\n\006region\030\001 \002(\013" + + "2\031.hbase.pb.RegionSpecifier\022\024\n\005force\030\002 \001", + "(\010:\005false\"\030\n\026UnassignRegionResponse\"A\n\024O" + + "fflineRegionRequest\022)\n\006region\030\001 \002(\0132\031.hb" + + "ase.pb.RegionSpecifier\"\027\n\025OfflineRegionR" + + "esponse\"\177\n\022CreateTableRequest\022+\n\014table_s" + + "chema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\nsp" + + "lit_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" + + "\n\005nonce\030\004 \001(\004:\0010\"&\n\023CreateTableResponse\022" + + "\017\n\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest\022\'" + + "\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022" + + "\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\001", + "0\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 \001(" + + "\004\"\207\001\n\024TruncateTableRequest\022&\n\ttableName\030" + + "\001 \002(\0132\023.hbase.pb.TableName\022\035\n\016preserveSp" + + "lits\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004:\001" + + "0\022\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableResp" + + "onse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableRequ" + + "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" + + "Name\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 " + + "\001(\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_id" + + "\030\001 \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable_n", + "ame\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_" + + "group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Disa" + + "bleTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022Mo" + + "difyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.h" + + "base.pb.TableName\022+\n\014table_schema\030\002 \002(\0132" + + "\025.hbase.pb.TableSchema\022\026\n\013nonce_group\030\003 " + + "\001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTableR" + + "esponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateNamesp" + + "aceRequest\022:\n\023namespaceDescriptor\030\001 \002(\0132" + + "\035.hbase.pb.NamespaceDescriptor\022\026\n\013nonce_", + "group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Crea" + + "teNamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026" + + "DeleteNamespaceRequest\022\025\n\rnamespaceName\030" + + "\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003" + + " \001(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007pr" + + "oc_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022:\n" + + "\023namespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Na" + + "mespaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\001" + + "0\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespaceRe" + + "sponse\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespaceD", + "escriptorRequest\022\025\n\rnamespaceName\030\001 \002(\t\"" + + "\\\n\036GetNamespaceDescriptorResponse\022:\n\023nam" + + "espaceDescriptor\030\001 \002(\0132\035.hbase.pb.Namesp" + + "aceDescriptor\"!\n\037ListNamespaceDescriptor" + + "sRequest\"^\n ListNamespaceDescriptorsResp" + + "onse\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hbas" + + "e.pb.NamespaceDescriptor\"?\n&ListTableDes" + + "criptorsByNamespaceRequest\022\025\n\rnamespaceN" + + "ame\030\001 \002(\t\"U\n\'ListTableDescriptorsByNames" + + "paceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hbas", + "e.pb.TableSchema\"9\n ListTableNamesByName" + + "spaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n!L" + + "istTableNamesByNamespaceResponse\022&\n\ttabl" + + "eName\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Shut" + + "downRequest\"\022\n\020ShutdownResponse\"\023\n\021StopM" + + "asterRequest\"\024\n\022StopMasterResponse\"\034\n\032Is" + + "InMaintenanceModeRequest\"8\n\033IsInMaintena" + + "nceModeResponse\022\031\n\021inMaintenanceMode\030\001 \002" + + "(\010\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n\017B" + + "alanceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031", + "SetBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n" + + "\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRunning" + + "Response\022\032\n\022prev_balance_value\030\001 \001(\010\"\032\n\030" + + "IsBalancerEnabledRequest\",\n\031IsBalancerEn" + + "abledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetSpl" + + "itOrMergeEnabledRequest\022\017\n\007enabled\030\001 \002(\010" + + "\022\023\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030\003 " + + "\003(\0162\032.hbase.pb.MasterSwitchType\"4\n\036SetSp" + + "litOrMergeEnabledResponse\022\022\n\nprev_value\030" + + "\001 \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022/\n", + "\013switch_type\030\001 \002(\0162\032.hbase.pb.MasterSwit" + + "chType\"0\n\035IsSplitOrMergeEnabledResponse\022" + + "\017\n\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+\n\021" + + "NormalizeResponse\022\026\n\016normalizer_ran\030\001 \002(" + + "\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on\030\001" + + " \002(\010\"=\n\034SetNormalizerRunningResponse\022\035\n\025" + + "prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNormali" + + "zerEnabledRequest\".\n\033IsNormalizerEnabled" + + "Response\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogS" + + "canRequest\"-\n\026RunCatalogScanResponse\022\023\n\013", + "scan_result\030\001 \001(\005\"-\n\033EnableCatalogJanito" + + "rRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalo" + + "gJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036" + + "IsCatalogJanitorEnabledRequest\"0\n\037IsCata" + + "logJanitorEnabledResponse\022\r\n\005value\030\001 \002(\010" + + "\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035." + + "hbase.pb.SnapshotDescription\",\n\020Snapshot" + + "Response\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034Ge" + + "tCompletedSnapshotsRequest\"Q\n\035GetComplet" + + "edSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\0132\035", + ".hbase.pb.SnapshotDescription\"H\n\025DeleteS" + + "napshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase" + + ".pb.SnapshotDescription\"\030\n\026DeleteSnapsho" + + "tResponse\"s\n\026RestoreSnapshotRequest\022/\n\010s" + + "napshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescrip" + + "tion\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 " + + "\001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007pro" + + "c_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n\010s" + + "napshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescrip" + + "tion\"^\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001", + " \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb" + + ".SnapshotDescription\"O\n\034IsRestoreSnapsho" + + "tDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.p" + + "b.SnapshotDescription\"4\n\035IsRestoreSnapsh" + + "otDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033G" + + "etSchemaAlterStatusRequest\022\'\n\ntable_name" + + "\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSchema" + + "AlterStatusResponse\022\035\n\025yet_to_update_reg" + + "ions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032Ge" + + "tTableDescriptorsRequest\022(\n\013table_names\030", + "\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 \001(" + + "\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021\n\t" + + "namespace\030\004 \001(\t\"J\n\033GetTableDescriptorsRe" + + "sponse\022+\n\014table_schema\030\001 \003(\0132\025.hbase.pb." + + "TableSchema\"[\n\024GetTableNamesRequest\022\r\n\005r" + + "egex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005" + + "false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableName" + + "sResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase.p" + + "b.TableName\"?\n\024GetTableStateRequest\022\'\n\nt" + + "able_name\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025", + "GetTableStateResponse\022)\n\013table_state\030\001 \002" + + "(\0132\024.hbase.pb.TableState\"\031\n\027GetClusterSt" + + "atusRequest\"K\n\030GetClusterStatusResponse\022" + + "/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clust" + + "erStatus\"\030\n\026IsMasterRunningRequest\"4\n\027Is" + + "MasterRunningResponse\022\031\n\021is_master_runni" + + "ng\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tproc" + + "edure\030\001 \002(\0132\036.hbase.pb.ProcedureDescript" + + "ion\"F\n\025ExecProcedureResponse\022\030\n\020expected" + + "_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026I", + "sProcedureDoneRequest\0221\n\tprocedure\030\001 \001(\013" + + "2\036.hbase.pb.ProcedureDescription\"`\n\027IsPr" + + "ocedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005false" + + "\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.ProcedureD" + + "escription\",\n\031GetProcedureResultRequest\022" + + "\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultR" + + "esponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetPro" + + "cedureResultResponse.State\022\022\n\nstart_time" + + "\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 " + + "\001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Foreig", + "nExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020" + + "\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortPro" + + "cedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInt" + + "erruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProc" + + "edureResponse\022\034\n\024is_procedure_aborted\030\001 " + + "\002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListProc" + + "eduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbas" + + "e.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tus" + + "er_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnam" + + "espace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase", + ".pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016byp" + + "ass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hb" + + "ase.pb.ThrottleRequest\"\022\n\020SetQuotaRespon" + + "se\"J\n\037MajorCompactionTimestampRequest\022\'\n" + + "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"U" + + "\n(MajorCompactionTimestampForRegionReque" + + "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" + + "ifier\"@\n MajorCompactionTimestampRespons" + + "e\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Secur" + + "ityCapabilitiesRequest\"\354\001\n\034SecurityCapab", + "ilitiesResponse\022G\n\014capabilities\030\001 \003(\01621." + + "hbase.pb.SecurityCapabilitiesResponse.Ca" + + "pability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHEN" + + "TICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n" + + "\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003" + + "\022\023\n\017CELL_VISIBILITY\020\004\"\"\n ListDrainingReg" + + "ionServersRequest\"N\n!ListDrainingRegionS" + + "erversResponse\022)\n\013server_name\030\001 \003(\0132\024.hb" + + "ase.pb.ServerName\"F\n\031DrainRegionServersR" + + "equest\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.S", + "erverName\"\034\n\032DrainRegionServersResponse\"" + + "P\n#RemoveDrainFromRegionServersRequest\022)" + + "\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerNam" + + "e\"&\n$RemoveDrainFromRegionServersRespons" + + "e*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERG" + + "E\020\0012\2221\n\rMasterService\022e\n\024GetSchemaAlterS" + + "tatus\022%.hbase.pb.GetSchemaAlterStatusReq" + + "uest\032&.hbase.pb.GetSchemaAlterStatusResp" + + "onse\022b\n\023GetTableDescriptors\022$.hbase.pb.G" + + "etTableDescriptorsRequest\032%.hbase.pb.Get", + "TableDescriptorsResponse\022P\n\rGetTableName" + + "s\022\036.hbase.pb.GetTableNamesRequest\032\037.hbas" + + "e.pb.GetTableNamesResponse\022Y\n\020GetCluster" + + "Status\022!.hbase.pb.GetClusterStatusReques" + + "t\032\".hbase.pb.GetClusterStatusResponse\022V\n" + + "\017IsMasterRunning\022 .hbase.pb.IsMasterRunn" + + "ingRequest\032!.hbase.pb.IsMasterRunningRes" + + "ponse\022D\n\tAddColumn\022\032.hbase.pb.AddColumnR" + + "equest\032\033.hbase.pb.AddColumnResponse\022M\n\014D" + + "eleteColumn\022\035.hbase.pb.DeleteColumnReque", + "st\032\036.hbase.pb.DeleteColumnResponse\022M\n\014Mo" + + "difyColumn\022\035.hbase.pb.ModifyColumnReques" + + "t\032\036.hbase.pb.ModifyColumnResponse\022G\n\nMov" + + "eRegion\022\033.hbase.pb.MoveRegionRequest\032\034.h" + + "base.pb.MoveRegionResponse\022\\\n\021MergeTable" + + "Regions\022\".hbase.pb.MergeTableRegionsRequ" + + "est\032#.hbase.pb.MergeTableRegionsResponse" + + "\022M\n\014AssignRegion\022\035.hbase.pb.AssignRegion" + + "Request\032\036.hbase.pb.AssignRegionResponse\022" + + "S\n\016UnassignRegion\022\037.hbase.pb.UnassignReg", + "ionRequest\032 .hbase.pb.UnassignRegionResp" + + "onse\022P\n\rOfflineRegion\022\036.hbase.pb.Offline" + + "RegionRequest\032\037.hbase.pb.OfflineRegionRe" + + "sponse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteT" + + "ableRequest\032\035.hbase.pb.DeleteTableRespon" + + "se\022P\n\rtruncateTable\022\036.hbase.pb.TruncateT" + + "ableRequest\032\037.hbase.pb.TruncateTableResp" + + "onse\022J\n\013EnableTable\022\034.hbase.pb.EnableTab" + + "leRequest\032\035.hbase.pb.EnableTableResponse" + + "\022M\n\014DisableTable\022\035.hbase.pb.DisableTable", + "Request\032\036.hbase.pb.DisableTableResponse\022" + + "J\n\013ModifyTable\022\034.hbase.pb.ModifyTableReq" + + "uest\032\035.hbase.pb.ModifyTableResponse\022J\n\013C" + + "reateTable\022\034.hbase.pb.CreateTableRequest" + + "\032\035.hbase.pb.CreateTableResponse\022A\n\010Shutd" + + "own\022\031.hbase.pb.ShutdownRequest\032\032.hbase.p" + + "b.ShutdownResponse\022G\n\nStopMaster\022\033.hbase" + + ".pb.StopMasterRequest\032\034.hbase.pb.StopMas" + + "terResponse\022h\n\031IsMasterInMaintenanceMode" + + "\022$.hbase.pb.IsInMaintenanceModeRequest\032%", + ".hbase.pb.IsInMaintenanceModeResponse\022>\n" + + "\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hba" + + "se.pb.BalanceResponse\022_\n\022SetBalancerRunn" + + "ing\022#.hbase.pb.SetBalancerRunningRequest" + + "\032$.hbase.pb.SetBalancerRunningResponse\022\\" + + "\n\021IsBalancerEnabled\022\".hbase.pb.IsBalance" + + "rEnabledRequest\032#.hbase.pb.IsBalancerEna" + + "bledResponse\022k\n\026SetSplitOrMergeEnabled\022\'" + + ".hbase.pb.SetSplitOrMergeEnabledRequest\032" + + "(.hbase.pb.SetSplitOrMergeEnabledRespons", + "e\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.Is" + + "SplitOrMergeEnabledRequest\032\'.hbase.pb.Is" + + "SplitOrMergeEnabledResponse\022D\n\tNormalize" + + "\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb." + + "NormalizeResponse\022e\n\024SetNormalizerRunnin" + + "g\022%.hbase.pb.SetNormalizerRunningRequest" + + "\032&.hbase.pb.SetNormalizerRunningResponse" + + "\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNor" + + "malizerEnabledRequest\032%.hbase.pb.IsNorma" + + "lizerEnabledResponse\022S\n\016RunCatalogScan\022\037", + ".hbase.pb.RunCatalogScanRequest\032 .hbase." + + "pb.RunCatalogScanResponse\022e\n\024EnableCatal" + + "ogJanitor\022%.hbase.pb.EnableCatalogJanito" + + "rRequest\032&.hbase.pb.EnableCatalogJanitor" + + "Response\022n\n\027IsCatalogJanitorEnabled\022(.hb" + + "ase.pb.IsCatalogJanitorEnabledRequest\032)." + + "hbase.pb.IsCatalogJanitorEnabledResponse" + + "\022^\n\021ExecMasterService\022#.hbase.pb.Coproce" + + "ssorServiceRequest\032$.hbase.pb.Coprocesso" + + "rServiceResponse\022A\n\010Snapshot\022\031.hbase.pb.", + "SnapshotRequest\032\032.hbase.pb.SnapshotRespo" + + "nse\022h\n\025GetCompletedSnapshots\022&.hbase.pb." + + "GetCompletedSnapshotsRequest\032\'.hbase.pb." + + "GetCompletedSnapshotsResponse\022S\n\016DeleteS" + + "napshot\022\037.hbase.pb.DeleteSnapshotRequest" + + "\032 .hbase.pb.DeleteSnapshotResponse\022S\n\016Is" + + "SnapshotDone\022\037.hbase.pb.IsSnapshotDoneRe" + + "quest\032 .hbase.pb.IsSnapshotDoneResponse\022" + + "V\n\017RestoreSnapshot\022 .hbase.pb.RestoreSna" + + "pshotRequest\032!.hbase.pb.RestoreSnapshotR", + "esponse\022P\n\rExecProcedure\022\036.hbase.pb.Exec" + "ProcedureRequest\032\037.hbase.pb.ExecProcedur" + - "eResponse\022V\n\017IsProcedureDone\022 .hbase.pb." + - "IsProcedureDoneRequest\032!.hbase.pb.IsProc" + - "edureDoneResponse\022V\n\017ModifyNamespace\022 .h" + - "base.pb.ModifyNamespaceRequest\032!.hbase.p" + - "b.ModifyNamespaceResponse\022V\n\017CreateNames" + - "pace\022 .hbase.pb.CreateNamespaceRequest\032!" + - ".hbase.pb.CreateNamespaceResponse\022V\n\017Del", - "eteNamespace\022 .hbase.pb.DeleteNamespaceR" + - "equest\032!.hbase.pb.DeleteNamespaceRespons" + - "e\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb.G" + - "etNamespaceDescriptorRequest\032(.hbase.pb." + - "GetNamespaceDescriptorResponse\022q\n\030ListNa" + - "mespaceDescriptors\022).hbase.pb.ListNamesp" + - "aceDescriptorsRequest\032*.hbase.pb.ListNam" + - "espaceDescriptorsResponse\022\206\001\n\037ListTableD" + - "escriptorsByNamespace\0220.hbase.pb.ListTab" + - "leDescriptorsByNamespaceRequest\0321.hbase.", - "pb.ListTableDescriptorsByNamespaceRespon" + - "se\022t\n\031ListTableNamesByNamespace\022*.hbase." + - "pb.ListTableNamesByNamespaceRequest\032+.hb" + - "ase.pb.ListTableNamesByNamespaceResponse" + - "\022P\n\rGetTableState\022\036.hbase.pb.GetTableSta" + - "teRequest\032\037.hbase.pb.GetTableStateRespon" + - "se\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReques" + - "t\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLast" + - "MajorCompactionTimestamp\022).hbase.pb.Majo" + - "rCompactionTimestampRequest\032*.hbase.pb.M", - "ajorCompactionTimestampResponse\022\212\001\n(getL" + - "astMajorCompactionTimestampForRegion\0222.h" + - "base.pb.MajorCompactionTimestampForRegio" + - "nRequest\032*.hbase.pb.MajorCompactionTimes" + - "tampResponse\022_\n\022getProcedureResult\022#.hba" + - "se.pb.GetProcedureResultRequest\032$.hbase." + - "pb.GetProcedureResultResponse\022h\n\027getSecu" + - "rityCapabilities\022%.hbase.pb.SecurityCapa" + - "bilitiesRequest\032&.hbase.pb.SecurityCapab" + - "ilitiesResponse\022S\n\016AbortProcedure\022\037.hbas", - "e.pb.AbortProcedureRequest\032 .hbase.pb.Ab" + - "ortProcedureResponse\022S\n\016ListProcedures\022\037" + - ".hbase.pb.ListProceduresRequest\032 .hbase." + - "pb.ListProceduresResponse\022_\n\022AddReplicat" + - "ionPeer\022#.hbase.pb.AddReplicationPeerReq" + - "uest\032$.hbase.pb.AddReplicationPeerRespon" + - "se\022h\n\025RemoveReplicationPeer\022&.hbase.pb.R" + - "emoveReplicationPeerRequest\032\'.hbase.pb.R" + - "emoveReplicationPeerResponse\022h\n\025EnableRe" + - "plicationPeer\022&.hbase.pb.EnableReplicati", - "onPeerRequest\032\'.hbase.pb.EnableReplicati" + - "onPeerResponse\022k\n\026DisableReplicationPeer" + - "\022\'.hbase.pb.DisableReplicationPeerReques" + - "t\032(.hbase.pb.DisableReplicationPeerRespo" + - "nse\022q\n\030GetReplicationPeerConfig\022).hbase." + - "pb.GetReplicationPeerConfigRequest\032*.hba" + - "se.pb.GetReplicationPeerConfigResponse\022z" + - "\n\033UpdateReplicationPeerConfig\022,.hbase.pb" + - ".UpdateReplicationPeerConfigRequest\032-.hb" + - "ase.pb.UpdateReplicationPeerConfigRespon", - "se\022e\n\024ListReplicationPeers\022%.hbase.pb.Li" + - "stReplicationPeersRequest\032&.hbase.pb.Lis" + - "tReplicationPeersResponse\022t\n\031listDrainin" + - "gRegionServers\022*.hbase.pb.ListDrainingRe" + - "gionServersRequest\032+.hbase.pb.ListDraini" + - "ngRegionServersResponse\022_\n\022drainRegionSe" + - "rvers\022#.hbase.pb.DrainRegionServersReque" + - "st\032$.hbase.pb.DrainRegionServersResponse" + - "\022}\n\034removeDrainFromRegionServers\022-.hbase" + - ".pb.RemoveDrainFromRegionServersRequest\032", - "..hbase.pb.RemoveDrainFromRegionServersR" + - "esponseBI\n1org.apache.hadoop.hbase.shade" + - "d.protobuf.generatedB\014MasterProtosH\001\210\001\001\240" + - "\001\001" + "eResponse\022W\n\024ExecProcedureWithRet\022\036.hbas" + + "e.pb.ExecProcedureRequest\032\037.hbase.pb.Exe" + + "cProcedureResponse\022V\n\017IsProcedureDone\022 ." + + "hbase.pb.IsProcedureDoneRequest\032!.hbase." + + "pb.IsProcedureDoneResponse\022V\n\017ModifyName" + + "space\022 .hbase.pb.ModifyNamespaceRequest\032" + + "!.hbase.pb.ModifyNamespaceResponse\022V\n\017Cr" + + "eateNamespace\022 .hbase.pb.CreateNamespace", + "Request\032!.hbase.pb.CreateNamespaceRespon" + + "se\022V\n\017DeleteNamespace\022 .hbase.pb.DeleteN" + + "amespaceRequest\032!.hbase.pb.DeleteNamespa" + + "ceResponse\022k\n\026GetNamespaceDescriptor\022\'.h" + + "base.pb.GetNamespaceDescriptorRequest\032(." + + "hbase.pb.GetNamespaceDescriptorResponse\022" + + "q\n\030ListNamespaceDescriptors\022).hbase.pb.L" + + "istNamespaceDescriptorsRequest\032*.hbase.p" + + "b.ListNamespaceDescriptorsResponse\022\206\001\n\037L" + + "istTableDescriptorsByNamespace\0220.hbase.p", + "b.ListTableDescriptorsByNamespaceRequest" + + "\0321.hbase.pb.ListTableDescriptorsByNamesp" + + "aceResponse\022t\n\031ListTableNamesByNamespace" + + "\022*.hbase.pb.ListTableNamesByNamespaceReq" + + "uest\032+.hbase.pb.ListTableNamesByNamespac" + + "eResponse\022P\n\rGetTableState\022\036.hbase.pb.Ge" + + "tTableStateRequest\032\037.hbase.pb.GetTableSt" + + "ateResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQu" + + "otaRequest\032\032.hbase.pb.SetQuotaResponse\022x" + + "\n\037getLastMajorCompactionTimestamp\022).hbas", + "e.pb.MajorCompactionTimestampRequest\032*.h" + + "base.pb.MajorCompactionTimestampResponse" + + "\022\212\001\n(getLastMajorCompactionTimestampForR" + + "egion\0222.hbase.pb.MajorCompactionTimestam" + + "pForRegionRequest\032*.hbase.pb.MajorCompac" + + "tionTimestampResponse\022_\n\022getProcedureRes" + + "ult\022#.hbase.pb.GetProcedureResultRequest" + + "\032$.hbase.pb.GetProcedureResultResponse\022h" + + "\n\027getSecurityCapabilities\022%.hbase.pb.Sec" + + "urityCapabilitiesRequest\032&.hbase.pb.Secu", + "rityCapabilitiesResponse\022S\n\016AbortProcedu" + + "re\022\037.hbase.pb.AbortProcedureRequest\032 .hb" + + "ase.pb.AbortProcedureResponse\022S\n\016ListPro" + + "cedures\022\037.hbase.pb.ListProceduresRequest" + + "\032 .hbase.pb.ListProceduresResponse\022_\n\022Ad" + + "dReplicationPeer\022#.hbase.pb.AddReplicati" + + "onPeerRequest\032$.hbase.pb.AddReplicationP" + + "eerResponse\022h\n\025RemoveReplicationPeer\022&.h" + + "base.pb.RemoveReplicationPeerRequest\032\'.h" + + "base.pb.RemoveReplicationPeerResponse\022h\n", + "\025EnableReplicationPeer\022&.hbase.pb.Enable" + + "ReplicationPeerRequest\032\'.hbase.pb.Enable" + + "ReplicationPeerResponse\022k\n\026DisableReplic" + + "ationPeer\022\'.hbase.pb.DisableReplicationP" + + "eerRequest\032(.hbase.pb.DisableReplication" + + "PeerResponse\022q\n\030GetReplicationPeerConfig" + + "\022).hbase.pb.GetReplicationPeerConfigRequ" + + "est\032*.hbase.pb.GetReplicationPeerConfigR" + + "esponse\022z\n\033UpdateReplicationPeerConfig\022," + + ".hbase.pb.UpdateReplicationPeerConfigReq", + "uest\032-.hbase.pb.UpdateReplicationPeerCon" + + "figResponse\022e\n\024ListReplicationPeers\022%.hb" + + "ase.pb.ListReplicationPeersRequest\032&.hba" + + "se.pb.ListReplicationPeersResponse\022t\n\031li" + + "stDrainingRegionServers\022*.hbase.pb.ListD" + + "rainingRegionServersRequest\032+.hbase.pb.L" + + "istDrainingRegionServersResponse\022_\n\022drai" + + "nRegionServers\022#.hbase.pb.DrainRegionSer" + + "versRequest\032$.hbase.pb.DrainRegionServer" + + "sResponse\022}\n\034removeDrainFromRegionServer", + "s\022-.hbase.pb.RemoveDrainFromRegionServer" + + "sRequest\032..hbase.pb.RemoveDrainFromRegio" + + "nServersResponseBI\n1org.apache.hadoop.hb" + + "ase.shaded.protobuf.generatedB\014MasterPro" + + "tosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -75629,668 +73985,656 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MoveRegionResponse_descriptor, new java.lang.String[] { }); - internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_hbase_pb_DispatchMergingRegionsRequest_fieldAccessorTable = new - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor, - new java.lang.String[] { "RegionA", "RegionB", "Forcible", "NonceGroup", "Nonce", }); - internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_hbase_pb_DispatchMergingRegionsResponse_fieldAccessorTable = new - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor, - new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_MergeTableRegionsRequest_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(8); internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MergeTableRegionsRequest_descriptor, new java.lang.String[] { "Region", "Forcible", "NonceGroup", "Nonce", }); internal_static_hbase_pb_MergeTableRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(9); internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MergeTableRegionsResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_AssignRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(10); internal_static_hbase_pb_AssignRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AssignRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_AssignRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(11); internal_static_hbase_pb_AssignRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AssignRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_UnassignRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_UnassignRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UnassignRegionRequest_descriptor, new java.lang.String[] { "Region", "Force", }); internal_static_hbase_pb_UnassignRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(13); internal_static_hbase_pb_UnassignRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UnassignRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_OfflineRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_OfflineRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_OfflineRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_OfflineRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_OfflineRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_OfflineRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_CreateTableRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_CreateTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateTableRequest_descriptor, new java.lang.String[] { "TableSchema", "SplitKeys", "NonceGroup", "Nonce", }); internal_static_hbase_pb_CreateTableResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_CreateTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_DeleteTableRequest_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_DeleteTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteTableRequest_descriptor, new java.lang.String[] { "TableName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_DeleteTableResponse_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_DeleteTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_TruncateTableRequest_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_TruncateTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_TruncateTableRequest_descriptor, new java.lang.String[] { "TableName", "PreserveSplits", "NonceGroup", "Nonce", }); internal_static_hbase_pb_TruncateTableResponse_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_TruncateTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_TruncateTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_EnableTableRequest_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_EnableTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableTableRequest_descriptor, new java.lang.String[] { "TableName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_EnableTableResponse_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_EnableTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_DisableTableRequest_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_DisableTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DisableTableRequest_descriptor, new java.lang.String[] { "TableName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_DisableTableResponse_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_DisableTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DisableTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_ModifyTableRequest_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(26); internal_static_hbase_pb_ModifyTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyTableRequest_descriptor, new java.lang.String[] { "TableName", "TableSchema", "NonceGroup", "Nonce", }); internal_static_hbase_pb_ModifyTableResponse_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(27); internal_static_hbase_pb_ModifyTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_CreateNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(28); internal_static_hbase_pb_CreateNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceDescriptor", "NonceGroup", "Nonce", }); internal_static_hbase_pb_CreateNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(29); internal_static_hbase_pb_CreateNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateNamespaceResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_DeleteNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(30); internal_static_hbase_pb_DeleteNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_DeleteNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(31); internal_static_hbase_pb_DeleteNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteNamespaceResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_ModifyNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(34); + getDescriptor().getMessageTypes().get(32); internal_static_hbase_pb_ModifyNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceDescriptor", "NonceGroup", "Nonce", }); internal_static_hbase_pb_ModifyNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(35); + getDescriptor().getMessageTypes().get(33); internal_static_hbase_pb_ModifyNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyNamespaceResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetNamespaceDescriptorRequest_descriptor = - getDescriptor().getMessageTypes().get(36); + getDescriptor().getMessageTypes().get(34); internal_static_hbase_pb_GetNamespaceDescriptorRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetNamespaceDescriptorRequest_descriptor, new java.lang.String[] { "NamespaceName", }); internal_static_hbase_pb_GetNamespaceDescriptorResponse_descriptor = - getDescriptor().getMessageTypes().get(37); + getDescriptor().getMessageTypes().get(35); internal_static_hbase_pb_GetNamespaceDescriptorResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetNamespaceDescriptorResponse_descriptor, new java.lang.String[] { "NamespaceDescriptor", }); internal_static_hbase_pb_ListNamespaceDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(38); + getDescriptor().getMessageTypes().get(36); internal_static_hbase_pb_ListNamespaceDescriptorsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListNamespaceDescriptorsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListNamespaceDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(39); + getDescriptor().getMessageTypes().get(37); internal_static_hbase_pb_ListNamespaceDescriptorsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListNamespaceDescriptorsResponse_descriptor, new java.lang.String[] { "NamespaceDescriptor", }); internal_static_hbase_pb_ListTableDescriptorsByNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(40); + getDescriptor().getMessageTypes().get(38); internal_static_hbase_pb_ListTableDescriptorsByNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableDescriptorsByNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceName", }); internal_static_hbase_pb_ListTableDescriptorsByNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(41); + getDescriptor().getMessageTypes().get(39); internal_static_hbase_pb_ListTableDescriptorsByNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableDescriptorsByNamespaceResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_hbase_pb_ListTableNamesByNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(42); + getDescriptor().getMessageTypes().get(40); internal_static_hbase_pb_ListTableNamesByNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableNamesByNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceName", }); internal_static_hbase_pb_ListTableNamesByNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(43); + getDescriptor().getMessageTypes().get(41); internal_static_hbase_pb_ListTableNamesByNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableNamesByNamespaceResponse_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_ShutdownRequest_descriptor = - getDescriptor().getMessageTypes().get(44); + getDescriptor().getMessageTypes().get(42); internal_static_hbase_pb_ShutdownRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ShutdownRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ShutdownResponse_descriptor = - getDescriptor().getMessageTypes().get(45); + getDescriptor().getMessageTypes().get(43); internal_static_hbase_pb_ShutdownResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ShutdownResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_StopMasterRequest_descriptor = - getDescriptor().getMessageTypes().get(46); + getDescriptor().getMessageTypes().get(44); internal_static_hbase_pb_StopMasterRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopMasterRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_StopMasterResponse_descriptor = - getDescriptor().getMessageTypes().get(47); + getDescriptor().getMessageTypes().get(45); internal_static_hbase_pb_StopMasterResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopMasterResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor = - getDescriptor().getMessageTypes().get(48); + getDescriptor().getMessageTypes().get(46); internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor = - getDescriptor().getMessageTypes().get(49); + getDescriptor().getMessageTypes().get(47); internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor, new java.lang.String[] { "InMaintenanceMode", }); internal_static_hbase_pb_BalanceRequest_descriptor = - getDescriptor().getMessageTypes().get(50); + getDescriptor().getMessageTypes().get(48); internal_static_hbase_pb_BalanceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_BalanceRequest_descriptor, new java.lang.String[] { "Force", }); internal_static_hbase_pb_BalanceResponse_descriptor = - getDescriptor().getMessageTypes().get(51); + getDescriptor().getMessageTypes().get(49); internal_static_hbase_pb_BalanceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_BalanceResponse_descriptor, new java.lang.String[] { "BalancerRan", }); internal_static_hbase_pb_SetBalancerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(52); + getDescriptor().getMessageTypes().get(50); internal_static_hbase_pb_SetBalancerRunningRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetBalancerRunningRequest_descriptor, new java.lang.String[] { "On", "Synchronous", }); internal_static_hbase_pb_SetBalancerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(53); + getDescriptor().getMessageTypes().get(51); internal_static_hbase_pb_SetBalancerRunningResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetBalancerRunningResponse_descriptor, new java.lang.String[] { "PrevBalanceValue", }); internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(54); + getDescriptor().getMessageTypes().get(52); internal_static_hbase_pb_IsBalancerEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(55); + getDescriptor().getMessageTypes().get(53); internal_static_hbase_pb_IsBalancerEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(56); + getDescriptor().getMessageTypes().get(54); internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor, new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", }); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(57); + getDescriptor().getMessageTypes().get(55); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(58); + getDescriptor().getMessageTypes().get(56); internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor, new java.lang.String[] { "SwitchType", }); internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(59); + getDescriptor().getMessageTypes().get(57); internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_NormalizeRequest_descriptor = - getDescriptor().getMessageTypes().get(60); + getDescriptor().getMessageTypes().get(58); internal_static_hbase_pb_NormalizeRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_NormalizeRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_NormalizeResponse_descriptor = - getDescriptor().getMessageTypes().get(61); + getDescriptor().getMessageTypes().get(59); internal_static_hbase_pb_NormalizeResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_NormalizeResponse_descriptor, new java.lang.String[] { "NormalizerRan", }); internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(62); + getDescriptor().getMessageTypes().get(60); internal_static_hbase_pb_SetNormalizerRunningRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor, new java.lang.String[] { "On", }); internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(63); + getDescriptor().getMessageTypes().get(61); internal_static_hbase_pb_SetNormalizerRunningResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor, new java.lang.String[] { "PrevNormalizerValue", }); internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(64); + getDescriptor().getMessageTypes().get(62); internal_static_hbase_pb_IsNormalizerEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(65); + getDescriptor().getMessageTypes().get(63); internal_static_hbase_pb_IsNormalizerEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_RunCatalogScanRequest_descriptor = - getDescriptor().getMessageTypes().get(66); + getDescriptor().getMessageTypes().get(64); internal_static_hbase_pb_RunCatalogScanRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RunCatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(67); + getDescriptor().getMessageTypes().get(65); internal_static_hbase_pb_RunCatalogScanResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanResponse_descriptor, new java.lang.String[] { "ScanResult", }); internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(68); + getDescriptor().getMessageTypes().get(66); internal_static_hbase_pb_EnableCatalogJanitorRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor, new java.lang.String[] { "Enable", }); internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(69); + getDescriptor().getMessageTypes().get(67); internal_static_hbase_pb_EnableCatalogJanitorResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(70); + getDescriptor().getMessageTypes().get(68); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(71); + getDescriptor().getMessageTypes().get(69); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor, new java.lang.String[] { "Value", }); internal_static_hbase_pb_SnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(72); + getDescriptor().getMessageTypes().get(70); internal_static_hbase_pb_SnapshotRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_SnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(73); + getDescriptor().getMessageTypes().get(71); internal_static_hbase_pb_SnapshotResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SnapshotResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", }); internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor = - getDescriptor().getMessageTypes().get(74); + getDescriptor().getMessageTypes().get(72); internal_static_hbase_pb_GetCompletedSnapshotsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(73); internal_static_hbase_pb_GetCompletedSnapshotsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor, new java.lang.String[] { "Snapshots", }); internal_static_hbase_pb_DeleteSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(74); internal_static_hbase_pb_DeleteSnapshotRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_DeleteSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(75); internal_static_hbase_pb_DeleteSnapshotResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RestoreSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(76); internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", }); internal_static_hbase_pb_RestoreSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(77); internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(78); internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(79); internal_static_hbase_pb_IsSnapshotDoneResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(82); + getDescriptor().getMessageTypes().get(80); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(83); + getDescriptor().getMessageTypes().get(81); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", }); internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(84); + getDescriptor().getMessageTypes().get(82); internal_static_hbase_pb_GetSchemaAlterStatusRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(85); + getDescriptor().getMessageTypes().get(83); internal_static_hbase_pb_GetSchemaAlterStatusResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor, new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", }); internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(86); + getDescriptor().getMessageTypes().get(84); internal_static_hbase_pb_GetTableDescriptorsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor, new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(87); + getDescriptor().getMessageTypes().get(85); internal_static_hbase_pb_GetTableDescriptorsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_hbase_pb_GetTableNamesRequest_descriptor = - getDescriptor().getMessageTypes().get(88); + getDescriptor().getMessageTypes().get(86); internal_static_hbase_pb_GetTableNamesRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesRequest_descriptor, new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableNamesResponse_descriptor = - getDescriptor().getMessageTypes().get(89); + getDescriptor().getMessageTypes().get(87); internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); internal_static_hbase_pb_GetTableStateRequest_descriptor = - getDescriptor().getMessageTypes().get(90); + getDescriptor().getMessageTypes().get(88); internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableStateRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetTableStateResponse_descriptor = - getDescriptor().getMessageTypes().get(91); + getDescriptor().getMessageTypes().get(89); internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableStateResponse_descriptor, new java.lang.String[] { "TableState", }); internal_static_hbase_pb_GetClusterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(92); + getDescriptor().getMessageTypes().get(90); internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(93); + getDescriptor().getMessageTypes().get(91); internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_hbase_pb_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(94); + getDescriptor().getMessageTypes().get(92); internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(95); + getDescriptor().getMessageTypes().get(93); internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_hbase_pb_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(96); + getDescriptor().getMessageTypes().get(94); internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(97); + getDescriptor().getMessageTypes().get(95); internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_hbase_pb_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(98); + getDescriptor().getMessageTypes().get(96); internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(99); + getDescriptor().getMessageTypes().get(97); internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_GetProcedureResultRequest_descriptor = - getDescriptor().getMessageTypes().get(100); + getDescriptor().getMessageTypes().get(98); internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultRequest_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetProcedureResultResponse_descriptor = - getDescriptor().getMessageTypes().get(101); + getDescriptor().getMessageTypes().get(99); internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_hbase_pb_AbortProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(102); + getDescriptor().getMessageTypes().get(100); internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureRequest_descriptor, new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); internal_static_hbase_pb_AbortProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(103); + getDescriptor().getMessageTypes().get(101); internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureResponse_descriptor, new java.lang.String[] { "IsProcedureAborted", }); internal_static_hbase_pb_ListProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(104); + getDescriptor().getMessageTypes().get(102); internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListProceduresRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(105); + getDescriptor().getMessageTypes().get(103); internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListProceduresResponse_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(106); + getDescriptor().getMessageTypes().get(104); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(107); + getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(108); + getDescriptor().getMessageTypes().get(106); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(109); + getDescriptor().getMessageTypes().get(107); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(110); + getDescriptor().getMessageTypes().get(108); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(111); + getDescriptor().getMessageTypes().get(109); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(112); + getDescriptor().getMessageTypes().get(110); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor = - getDescriptor().getMessageTypes().get(113); + getDescriptor().getMessageTypes().get(111); internal_static_hbase_pb_ListDrainingRegionServersRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor = - getDescriptor().getMessageTypes().get(114); + getDescriptor().getMessageTypes().get(112); internal_static_hbase_pb_ListDrainingRegionServersResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor, new java.lang.String[] { "ServerName", }); internal_static_hbase_pb_DrainRegionServersRequest_descriptor = - getDescriptor().getMessageTypes().get(115); + getDescriptor().getMessageTypes().get(113); internal_static_hbase_pb_DrainRegionServersRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DrainRegionServersRequest_descriptor, new java.lang.String[] { "ServerName", }); internal_static_hbase_pb_DrainRegionServersResponse_descriptor = - getDescriptor().getMessageTypes().get(116); + getDescriptor().getMessageTypes().get(114); internal_static_hbase_pb_DrainRegionServersResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DrainRegionServersResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_descriptor = - getDescriptor().getMessageTypes().get(117); + getDescriptor().getMessageTypes().get(115); internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_descriptor, new java.lang.String[] { "ServerName", }); internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_descriptor = - getDescriptor().getMessageTypes().get(118); + getDescriptor().getMessageTypes().get(116); internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_descriptor, diff --git hbase-protocol-shaded/src/main/protobuf/Admin.proto hbase-protocol-shaded/src/main/protobuf/Admin.proto index e8cf10c..338c80b 100644 --- hbase-protocol-shaded/src/main/protobuf/Admin.proto +++ hbase-protocol-shaded/src/main/protobuf/Admin.proto @@ -189,22 +189,6 @@ message UpdateFavoredNodesResponse { optional uint32 response = 1; } -/** - * Merges the specified regions. - *

- * This method currently closes the regions and then merges them - */ -message MergeRegionsRequest { - required RegionSpecifier region_a = 1; - required RegionSpecifier region_b = 2; - optional bool forcible = 3 [default = false]; - // wall clock time from master - optional uint64 master_system_time = 4; -} - -message MergeRegionsResponse { -} - // Protocol buffer version of WAL for replication message WALEntry { required WALKey key = 1; @@ -307,9 +291,6 @@ service AdminService { rpc CompactRegion(CompactRegionRequest) returns(CompactRegionResponse); - rpc MergeRegions(MergeRegionsRequest) - returns(MergeRegionsResponse); - rpc ReplicateWALEntry(ReplicateWALEntryRequest) returns(ReplicateWALEntryResponse); diff --git hbase-protocol-shaded/src/main/protobuf/Master.proto hbase-protocol-shaded/src/main/protobuf/Master.proto index 7cd9921..e62f52c 100644 --- hbase-protocol-shaded/src/main/protobuf/Master.proto +++ hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -80,21 +80,6 @@ message MoveRegionResponse { } /** - * Dispatch merging the specified regions. - */ -message DispatchMergingRegionsRequest { - required RegionSpecifier region_a = 1; - required RegionSpecifier region_b = 2; - optional bool forcible = 3 [default = false]; - optional uint64 nonce_group = 4 [default = 0]; - optional uint64 nonce = 5 [default = 0]; -} - -message DispatchMergingRegionsResponse { - optional uint64 proc_id = 1; -} - -/** * Merging the specified regions in a table. */ message MergeTableRegionsRequest { @@ -625,10 +610,6 @@ service MasterService { rpc MoveRegion(MoveRegionRequest) returns(MoveRegionResponse); - /** Master dispatch merging the regions */ - rpc DispatchMergingRegions(DispatchMergingRegionsRequest) - returns(DispatchMergingRegionsResponse); - /** Master merge the regions */ rpc MergeTableRegions(MergeTableRegionsRequest) returns(MergeTableRegionsResponse); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java index 00376cb..a0a1d49 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java @@ -59,11 +59,23 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver HTableDescriptor desc, HRegionInfo[] regions) throws IOException { } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * (HBASE-). + * Use {@link #preMergeRegions(ObserverContext, HRegionInfo[])} + */ + @Deprecated @Override public void preDispatchMerge(final ObserverContext ctx, HRegionInfo regionA, HRegionInfo regionB) throws IOException { } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * (HBASE-). + * Use {@link #postMergeRegions(ObserverContext, HRegionInfo[])} + */ + @Deprecated @Override public void postDispatchMerge(final ObserverContext ctx, HRegionInfo regionA, HRegionInfo regionB) throws IOException { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 461148b..98f21b2 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -111,11 +111,23 @@ public class BaseMasterObserver implements MasterObserver { final HRegionInfo[] regions) throws IOException { } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * (HBASE-). + * Use {@link #preMergeRegions(ObserverContext, HRegionInfo[])} + */ + @Deprecated @Override public void preDispatchMerge(final ObserverContext ctx, HRegionInfo regionA, HRegionInfo regionB) throws IOException { } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * (HBASE-). + * Use {@link #postMergeRegions(ObserverContext, HRegionInfo[])} + */ + @Deprecated @Override public void postDispatchMerge(final ObserverContext ctx, HRegionInfo regionA, HRegionInfo regionB) throws IOException { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 82b3cfa..bb0e732 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -1699,7 +1699,11 @@ public interface MasterObserver extends Coprocessor { * @param regionA first region to be merged * @param regionB second region to be merged * @throws IOException if an error occurred on the coprocessor + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * (HBASE-). + * Use {@link #preMergeRegions(ObserverContext, HRegionInfo[])} */ + @Deprecated void preDispatchMerge(final ObserverContext ctx, HRegionInfo regionA, HRegionInfo regionB) throws IOException; @@ -1709,7 +1713,11 @@ public interface MasterObserver extends Coprocessor { * @param regionA first region to be merged * @param regionB second region to be merged * @throws IOException if an error occurred on the coprocessor + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * (HBASE-). + * Use {@link #postMergeRegions(ObserverContext, HRegionInfo[])} */ + @Deprecated void postDispatchMerge(final ObserverContext c, final HRegionInfo regionA, final HRegionInfo regionB) throws IOException; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a41960b..ab7a25e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -109,7 +109,6 @@ import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure; -import org.apache.hadoop.hbase.master.procedure.DispatchMergingRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; @@ -1419,55 +1418,6 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override - public long dispatchMergingRegions( - final HRegionInfo regionInfoA, - final HRegionInfo regionInfoB, - final boolean forcible, - final long nonceGroup, - final long nonce) throws IOException { - checkInitialized(); - - TableName tableName = regionInfoA.getTable(); - if (tableName == null || regionInfoB.getTable() == null) { - throw new UnknownRegionException ("Can't merge regions without table associated"); - } - - if (!tableName.equals(regionInfoB.getTable())) { - throw new IOException ("Cannot merge regions from two different tables"); - } - - if (regionInfoA.compareTo(regionInfoB) == 0) { - throw new MergeRegionException( - "Cannot merge a region to itself " + regionInfoA + ", " + regionInfoB); - } - - HRegionInfo [] regionsToMerge = new HRegionInfo[2]; - regionsToMerge [0] = regionInfoA; - regionsToMerge [1] = regionInfoB; - - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDispatchMerge(regionInfoA, regionInfoB); - - LOG.info(getClientIdAuditPrefix() + " Merge regions " - + regionInfoA.getEncodedName() + " and " + regionInfoB.getEncodedName()); - - submitProcedure(new DispatchMergingRegionsProcedure(procedureExecutor.getEnvironment(), - tableName, regionsToMerge, forcible)); - - getMaster().getMasterCoprocessorHost().postDispatchMerge(regionInfoA, regionInfoB); - } - - @Override - protected String getDescription() { - return "DisableTableProcedure"; - } - }); - } - - @Override public long mergeRegions( final HRegionInfo[] regionsToMerge, final boolean forcible, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 2f17a5f..3dec2e8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -773,28 +773,6 @@ public class MasterCoprocessorHost }); } - public void preDispatchMerge(final HRegionInfo regionInfoA, final HRegionInfo regionInfoB) - throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { - @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDispatchMerge(ctx, regionInfoA, regionInfoB); - } - }); - } - - public void postDispatchMerge(final HRegionInfo regionInfoA, final HRegionInfo regionInfoB) - throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { - @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postDispatchMerge(ctx, regionInfoA, regionInfoB); - } - }); - } - public void preMergeRegions(final HRegionInfo[] regionsToMerge) throws IOException { execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 5873986..1151c92 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -501,52 +501,6 @@ public class MasterRpcServices extends RSRpcServices } @Override - public DispatchMergingRegionsResponse dispatchMergingRegions(RpcController c, - DispatchMergingRegionsRequest request) throws ServiceException { - try { - master.checkInitialized(); - } catch (IOException ioe) { - throw new ServiceException(ioe); - } - - final byte[] encodedNameOfRegionA = request.getRegionA().getValue() - .toByteArray(); - final byte[] encodedNameOfRegionB = request.getRegionB().getValue() - .toByteArray(); - if (request.getRegionA().getType() != RegionSpecifierType.ENCODED_REGION_NAME - || request.getRegionB().getType() != RegionSpecifierType.ENCODED_REGION_NAME) { - LOG.warn("mergeRegions specifier type: expected: " - + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region_a=" - + request.getRegionA().getType() + ", region_b=" - + request.getRegionB().getType()); - } - - RegionStates regionStates = master.getAssignmentManager().getRegionStates(); - RegionState regionStateA = regionStates.getRegionState(Bytes.toString(encodedNameOfRegionA)); - RegionState regionStateB = regionStates.getRegionState(Bytes.toString(encodedNameOfRegionB)); - if (regionStateA == null || regionStateB == null) { - throw new ServiceException(new UnknownRegionException( - Bytes.toStringBinary(regionStateA == null ? encodedNameOfRegionA - : encodedNameOfRegionB))); - } - - final HRegionInfo regionInfoA = regionStateA.getRegion(); - final HRegionInfo regionInfoB = regionStateB.getRegion(); - - try { - long procId = master.dispatchMergingRegions( - regionInfoA, - regionInfoB, - request.getForcible(), - request.getNonceGroup(), - request.getNonce()); - return DispatchMergingRegionsResponse.newBuilder().setProcId(procId).build(); - } catch (IOException ioe) { - throw new ServiceException(ioe); - } - } - - @Override public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, EnableCatalogJanitorRequest req) throws ServiceException { try { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 5019eda..79ebca5 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -326,23 +326,6 @@ public interface MasterServices extends Server { boolean registerService(Service instance); /** - * Merge two regions. The real implementation is on the regionserver, master - * just move the regions together and send MERGE RPC to regionserver - * @param region_a region to merge - * @param region_b region to merge - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions - * @return procedure Id - * @throws IOException - */ - long dispatchMergingRegions( - final HRegionInfo region_a, - final HRegionInfo region_b, - final boolean forcible, - final long nonceGroup, - final long nonce) throws IOException; - - /** * @return true if master is the active one */ boolean isActiveMaster(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index f3b21ac..38493cd 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -896,37 +896,6 @@ public class ServerManager { } /** - * Sends an MERGE REGIONS RPC to the specified server to merge the specified - * regions. - *

- * A region server could reject the close request because it either does not - * have the specified region. - * @param server server to merge regions - * @param region_a region to merge - * @param region_b region to merge - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions - * @throws IOException - */ - public void sendRegionsMerge(ServerName server, HRegionInfo region_a, - HRegionInfo region_b, boolean forcible, final User user) throws IOException { - if (server == null) - throw new NullPointerException("Passed server is null"); - if (region_a == null || region_b == null) - throw new NullPointerException("Passed region is null"); - AdminService.BlockingInterface admin = getRsAdmin(server); - if (admin == null) { - throw new IOException("Attempting to send MERGE REGIONS RPC to server " - + server.toString() + " for region " - + region_a.getRegionNameAsString() + "," - + region_b.getRegionNameAsString() - + " failed because no RPC connection found to this server"); - } - HBaseRpcController controller = newRpcController(); - ProtobufUtil.mergeRegions(controller, admin, region_a, region_b, forcible, user); - } - - /** * Check if a region server is reachable and has the expected start code */ public boolean isServerReachable(ServerName server) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java deleted file mode 100644 index ee92932..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java +++ /dev/null @@ -1,579 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.procedure; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InterruptedIOException; -import java.io.OutputStream; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.RegionLoad; -import org.apache.hadoop.hbase.ServerLoad; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.exceptions.MergeRegionException; -import org.apache.hadoop.hbase.exceptions.RegionOpeningException; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.CatalogJanitor; -import org.apache.hadoop.hbase.master.RegionPlan; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionStates; -import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsState; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; - -/** - * The procedure to Merge a region in a table. - */ -@InterfaceAudience.Private -public class DispatchMergingRegionsProcedure - extends AbstractStateMachineTableProcedure { - private static final Log LOG = LogFactory.getLog(DispatchMergingRegionsProcedure.class); - - private final AtomicBoolean aborted = new AtomicBoolean(false); - private Boolean traceEnabled; - private AssignmentManager assignmentManager; - private int timeout; - private ServerName regionLocation; - private String regionsToMergeListFullName; - private String regionsToMergeListEncodedName; - - private TableName tableName; - private HRegionInfo [] regionsToMerge; - private boolean forcible; - - public DispatchMergingRegionsProcedure() { - this.traceEnabled = isTraceEnabled(); - this.assignmentManager = null; - this.timeout = -1; - this.regionLocation = null; - this.regionsToMergeListFullName = null; - this.regionsToMergeListEncodedName = null; - } - - public DispatchMergingRegionsProcedure( - final MasterProcedureEnv env, - final TableName tableName, - final HRegionInfo [] regionsToMerge, - final boolean forcible) { - super(env); - this.traceEnabled = isTraceEnabled(); - this.assignmentManager = getAssignmentManager(env); - this.tableName = tableName; - // For now, we only merge 2 regions. It could be extended to more than 2 regions in - // the future. - assert(regionsToMerge.length == 2); - this.regionsToMerge = regionsToMerge; - this.forcible = forcible; - - this.timeout = -1; - this.regionsToMergeListFullName = getRegionsToMergeListFullNameString(); - this.regionsToMergeListEncodedName = getRegionsToMergeListEncodedNameString(); - } - - @Override - protected Flow executeFromState( - final MasterProcedureEnv env, - final DispatchMergingRegionsState state) throws InterruptedException { - if (isTraceEnabled()) { - LOG.trace(this + " execute state=" + state); - } - - try { - switch (state) { - case DISPATCH_MERGING_REGIONS_PREPARE: - prepareMergeRegion(env); - setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_PRE_OPERATION); - break; - case DISPATCH_MERGING_REGIONS_PRE_OPERATION: - //Unused for now - reserve to add preMerge coprocessor in the future - setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS); - break; - case DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS: - if (MoveRegionsToSameRS(env)) { - setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS); - } else { - LOG.info("Cancel merging regions " + getRegionsToMergeListFullNameString() - + ", because can't move them to the same RS"); - setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_POST_OPERATION); - } - break; - case DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS: - doMergeInRS(env); - setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_POST_OPERATION); - break; - case DISPATCH_MERGING_REGIONS_POST_OPERATION: - //Unused for now - reserve to add postCompletedMerge coprocessor in the future - return Flow.NO_MORE_STATE; - default: - throw new UnsupportedOperationException(this + " unhandled state=" + state); - } - } catch (IOException e) { - LOG.warn("Error trying to merge regions " + getRegionsToMergeListFullNameString() + - " in the table " + tableName + " (in state=" + state + ")", e); - - setFailure("master-merge-regions", e); - } - return Flow.HAS_MORE_STATE; - } - - @Override - protected void rollbackState( - final MasterProcedureEnv env, - final DispatchMergingRegionsState state) throws IOException, InterruptedException { - if (isTraceEnabled()) { - LOG.trace(this + " rollback state=" + state); - } - - try { - switch (state) { - case DISPATCH_MERGING_REGIONS_POST_OPERATION: - case DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS: - String msg = this + " We are in the " + state + " state." - + " It is complicated to rollback the merge operation that region server is working on." - + " Rollback is not supported and we should let the merge operation to complete"; - LOG.warn(msg); - // PONR - throw new UnsupportedOperationException(this + " unhandled state=" + state); - case DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS: - break; // nothing to rollback - case DISPATCH_MERGING_REGIONS_PRE_OPERATION: - break; // nothing to rollback - case DISPATCH_MERGING_REGIONS_PREPARE: - break; // nothing to rollback - default: - throw new UnsupportedOperationException(this + " unhandled state=" + state); - } - } catch (Exception e) { - // This will be retried. Unless there is a bug in the code, - // this should be just a "temporary error" (e.g. network down) - LOG.warn("Failed rollback attempt step " + state + " for merging the regions " - + getRegionsToMergeListFullNameString() + " in table " + tableName, e); - throw e; - } - } - - @Override - protected DispatchMergingRegionsState getState(final int stateId) { - return DispatchMergingRegionsState.valueOf(stateId); - } - - @Override - protected int getStateId(final DispatchMergingRegionsState state) { - return state.getNumber(); - } - - @Override - protected DispatchMergingRegionsState getInitialState() { - return DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_PREPARE; - } - - /* - * Check whether we are in the state that can be rollback - */ - @Override - protected boolean isRollbackSupported(final DispatchMergingRegionsState state) { - switch (state) { - case DISPATCH_MERGING_REGIONS_POST_OPERATION: - case DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS: - // It is not safe to rollback if we reach to these states. - return false; - default: - break; - } - return true; - } - - @Override - public void serializeStateData(final OutputStream stream) throws IOException { - super.serializeStateData(stream); - - MasterProcedureProtos.DispatchMergingRegionsStateData.Builder dispatchMergingRegionsMsg = - MasterProcedureProtos.DispatchMergingRegionsStateData.newBuilder() - .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .setForcible(forcible); - for (HRegionInfo hri: regionsToMerge) { - dispatchMergingRegionsMsg.addRegionInfo(HRegionInfo.convert(hri)); - } - dispatchMergingRegionsMsg.build().writeDelimitedTo(stream); - } - - @Override - public void deserializeStateData(final InputStream stream) throws IOException { - super.deserializeStateData(stream); - - MasterProcedureProtos.DispatchMergingRegionsStateData dispatchMergingRegionsMsg = - MasterProcedureProtos.DispatchMergingRegionsStateData.parseDelimitedFrom(stream); - setUser(MasterProcedureUtil.toUserInfo(dispatchMergingRegionsMsg.getUserInfo())); - tableName = ProtobufUtil.toTableName(dispatchMergingRegionsMsg.getTableName()); - - assert(dispatchMergingRegionsMsg.getRegionInfoCount() == 2); - regionsToMerge = new HRegionInfo[dispatchMergingRegionsMsg.getRegionInfoCount()]; - for (int i = 0; i < regionsToMerge.length; i++) { - regionsToMerge[i] = HRegionInfo.convert(dispatchMergingRegionsMsg.getRegionInfo(i)); - } - } - - @Override - public void toStringClassDetails(StringBuilder sb) { - sb.append(getClass().getSimpleName()); - sb.append(" (table="); - sb.append(tableName); - sb.append(" regions="); - sb.append(getRegionsToMergeListFullNameString()); - sb.append(" forcible="); - sb.append(forcible); - sb.append(")"); - } - - @Override - protected boolean acquireLock(final MasterProcedureEnv env) { - return !env.getProcedureQueue().waitRegions( - this, getTableName(), regionsToMerge[0], regionsToMerge[1]); - } - - @Override - protected void releaseLock(final MasterProcedureEnv env) { - env.getProcedureQueue().wakeRegions(this, getTableName(), regionsToMerge[0], regionsToMerge[1]); - } - - @Override - public TableName getTableName() { - return tableName; - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.MERGE; - } - - /** - * Prepare merge and do some check - * @param env MasterProcedureEnv - * @throws IOException - */ - private void prepareMergeRegion(final MasterProcedureEnv env) throws IOException { - // Note: the following logic assumes that we only have 2 regions to merge. In the future, - // if we want to extend to more than 2 regions, the code needs to modify a little bit. - // - CatalogJanitor catalogJanitor = env.getMasterServices().getCatalogJanitor(); - boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(regionsToMerge[0]); - if (regionAHasMergeQualifier - || !catalogJanitor.cleanMergeQualifier(regionsToMerge[1])) { - String msg = "Skip merging regions " + regionsToMerge[0].getRegionNameAsString() - + ", " + regionsToMerge[1].getRegionNameAsString() + ", because region " - + (regionAHasMergeQualifier ? regionsToMerge[0].getEncodedName() : regionsToMerge[1] - .getEncodedName()) + " has merge qualifier"; - LOG.info(msg); - throw new MergeRegionException(msg); - } - - RegionStates regionStates = getAssignmentManager(env).getRegionStates(); - RegionState regionStateA = regionStates.getRegionState(regionsToMerge[0].getEncodedName()); - RegionState regionStateB = regionStates.getRegionState(regionsToMerge[1].getEncodedName()); - if (regionStateA == null || regionStateB == null) { - throw new UnknownRegionException( - regionStateA == null ? - regionsToMerge[0].getEncodedName() : regionsToMerge[1].getEncodedName()); - } - - if (!regionStateA.isOpened() || !regionStateB.isOpened()) { - throw new MergeRegionException( - "Unable to merge regions not online " + regionStateA + ", " + regionStateB); - } - - if (regionsToMerge[0].getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || - regionsToMerge[1].getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new MergeRegionException("Can't merge non-default replicas"); - } - - if (!forcible && !HRegionInfo.areAdjacent(regionsToMerge[0], regionsToMerge[1])) { - throw new MergeRegionException( - "Unable to merge not adjacent regions " - + regionsToMerge[0].getRegionNameAsString() + ", " - + regionsToMerge[1].getRegionNameAsString() - + " where forcible = " + forcible); - } - } - - /** - * Move all regions to the same region server - * @param env MasterProcedureEnv - * @return whether target regions hosted by the same RS - * @throws IOException - */ - private boolean MoveRegionsToSameRS(final MasterProcedureEnv env) throws IOException { - // Make sure regions are on the same regionserver before send merge - // regions request to region server. - // - boolean onSameRS = isRegionsOnTheSameServer(env); - if (!onSameRS) { - // Note: the following logic assumes that we only have 2 regions to merge. In the future, - // if we want to extend to more than 2 regions, the code needs to modify a little bit. - // - RegionStates regionStates = getAssignmentManager(env).getRegionStates(); - ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]); - - RegionLoad loadOfRegionA = getRegionLoad(env, regionLocation, regionsToMerge[0]); - RegionLoad loadOfRegionB = getRegionLoad(env, regionLocation2, regionsToMerge[1]); - if (loadOfRegionA != null && loadOfRegionB != null - && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) { - // switch regionsToMerge[0] and regionsToMerge[1] - HRegionInfo tmpRegion = this.regionsToMerge[0]; - this.regionsToMerge[0] = this.regionsToMerge[1]; - this.regionsToMerge[1] = tmpRegion; - ServerName tmpLocation = regionLocation; - regionLocation = regionLocation2; - regionLocation2 = tmpLocation; - } - - long startTime = EnvironmentEdgeManager.currentTime(); - - RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation); - LOG.info("Moving regions to same server for merge: " + regionPlan.toString()); - getAssignmentManager(env).balance(regionPlan); - do { - try { - Thread.sleep(20); - // Make sure check RIT first, then get region location, otherwise - // we would make a wrong result if region is online between getting - // region location and checking RIT - boolean isRIT = regionStates.isRegionInTransition(regionsToMerge[1]); - regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]); - onSameRS = regionLocation.equals(regionLocation2); - if (onSameRS || !isRIT) { - // Regions are on the same RS, or regionsToMerge[1] is not in - // RegionInTransition any more - break; - } - } catch (InterruptedException e) { - InterruptedIOException iioe = new InterruptedIOException(); - iioe.initCause(e); - throw iioe; - } - } while ((EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env)); - } - return onSameRS; - } - - /** - * Do the real merge operation in the region server that hosts regions - * @param env MasterProcedureEnv - * @throws IOException - */ - private void doMergeInRS(final MasterProcedureEnv env) throws IOException { - long duration = 0; - long startTime = EnvironmentEdgeManager.currentTime(); - do { - try { - if (getServerName(env) == null) { - // The merge probably already happen. Check - RegionState regionState = getAssignmentManager(env).getRegionStates().getRegionState( - regionsToMerge[0].getEncodedName()); - if (regionState.isMerging() || regionState.isMerged()) { - LOG.info("Merge regions " + getRegionsToMergeListEncodedNameString() + - " is in progress or completed. No need to send a new request."); - } else { - LOG.warn("Cannot sending merge to hosting server of the regions " + - getRegionsToMergeListEncodedNameString() + " as the server is unknown"); - } - return; - } - // TODO: the following RPC call is not idempotent. Multiple calls (eg. after master - // failover, re-execute this step) could result in some exception thrown that does not - // paint the correct picture. This behavior is on-par with old releases. Improvement - // could happen in the future. - env.getMasterServices().getServerManager().sendRegionsMerge( - getServerName(env), - regionsToMerge[0], - regionsToMerge[1], - forcible, - getUser()); - LOG.info("Sent merge to server " + getServerName(env) + " for region " + - getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible); - return; - } catch (RegionOpeningException roe) { - // Do a retry since region should be online on RS immediately - LOG.warn("Failed mergering regions in " + getServerName(env) + ", retrying...", roe); - } catch (Exception ie) { - LOG.warn("Failed sending merge to " + getServerName(env) + " for regions " + - getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible, ie); - return; - } - } while ((duration = EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env)); - - // If we reaches here, it means that we get timed out. - String msg = "Failed sending merge to " + getServerName(env) + " after " + duration + "ms"; - LOG.warn(msg); - throw new IOException(msg); - } - - private RegionLoad getRegionLoad( - final MasterProcedureEnv env, - final ServerName sn, - final HRegionInfo hri) { - ServerManager serverManager = env.getMasterServices().getServerManager(); - ServerLoad load = serverManager.getLoad(sn); - if (load != null) { - Map regionsLoad = load.getRegionsLoad(); - if (regionsLoad != null) { - return regionsLoad.get(hri.getRegionName()); - } - } - return null; - } - - /** - * The procedure could be restarted from a different machine. If the variable is null, we need to - * retrieve it. - * @param env MasterProcedureEnv - * @return whether target regions hosted by the same RS - */ - private boolean isRegionsOnTheSameServer(final MasterProcedureEnv env) throws IOException{ - Boolean onSameRS = true; - int i = 0; - RegionStates regionStates = getAssignmentManager(env).getRegionStates(); - regionLocation = regionStates.getRegionServerOfRegion(regionsToMerge[i]); - if (regionLocation != null) { - for(i = 1; i < regionsToMerge.length; i++) { - ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[i]); - if (regionLocation2 != null) { - if (onSameRS) { - onSameRS = regionLocation.equals(regionLocation2); - } - } else { - // At least one region is not online, merge will fail, no need to continue. - break; - } - } - if (i == regionsToMerge.length) { - // Finish checking all regions, return the result; - return onSameRS; - } - } - - // If reaching here, at least one region is not online. - String msg = "Skip merging regions " + getRegionsToMergeListFullNameString() + - ", because region " + regionsToMerge[i].getEncodedName() + " is not online now."; - LOG.warn(msg); - throw new IOException(msg); - } - - /** - * The procedure could be restarted from a different machine. If the variable is null, we need to - * retrieve it. - * @param env MasterProcedureEnv - * @return assignmentManager - */ - private AssignmentManager getAssignmentManager(final MasterProcedureEnv env) { - if (assignmentManager == null) { - assignmentManager = env.getMasterServices().getAssignmentManager(); - } - return assignmentManager; - } - - /** - * The procedure could be restarted from a different machine. If the variable is null, we need to - * retrieve it. - * @param env MasterProcedureEnv - * @return timeout value - */ - private int getTimeout(final MasterProcedureEnv env) { - if (timeout == -1) { - timeout = env.getMasterConfiguration().getInt( - "hbase.master.regionmerge.timeout", regionsToMerge.length * 60 * 1000); - } - return timeout; - } - - /** - * The procedure could be restarted from a different machine. If the variable is null, we need to - * retrieve it. - * @param env MasterProcedureEnv - * @return serverName - */ - private ServerName getServerName(final MasterProcedureEnv env) { - if (regionLocation == null) { - regionLocation = - getAssignmentManager(env).getRegionStates().getRegionServerOfRegion(regionsToMerge[0]); - } - return regionLocation; - } - - /** - * The procedure could be restarted from a different machine. If the variable is null, we need to - * retrieve it. - * @param fullName whether return only encoded name - * @return region names in a list - */ - private String getRegionsToMergeListFullNameString() { - if (regionsToMergeListFullName == null) { - StringBuilder sb = new StringBuilder("["); - int i = 0; - while(i < regionsToMerge.length - 1) { - sb.append(regionsToMerge[i].getRegionNameAsString() + ", "); - i++; - } - sb.append(regionsToMerge[i].getRegionNameAsString() + " ]"); - regionsToMergeListFullName = sb.toString(); - } - return regionsToMergeListFullName; - } - - /** - * The procedure could be restarted from a different machine. If the variable is null, we need to - * retrieve it. - * @return encoded region names - */ - private String getRegionsToMergeListEncodedNameString() { - if (regionsToMergeListEncodedName == null) { - StringBuilder sb = new StringBuilder("["); - int i = 0; - while(i < regionsToMerge.length - 1) { - sb.append(regionsToMerge[i].getEncodedName() + ", "); - i++; - } - sb.append(regionsToMerge[i].getEncodedName() + " ]"); - regionsToMergeListEncodedName = sb.toString(); - } - return regionsToMergeListEncodedName; - } - - /** - * The procedure could be restarted from a different machine. If the variable is null, we need to - * retrieve it. - * @return traceEnabled - */ - private Boolean isTraceEnabled() { - if (traceEnabled == null) { - traceEnabled = LOG.isTraceEnabled(); - } - return traceEnabled; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 1331b86..63929a8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -217,20 +217,6 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return queueLists.toString(); } - public synchronized void requestRegionsMerge(final Region a, - final Region b, final boolean forcible, long masterSystemTime, User user) { - try { - mergePool.execute(new RegionMergeRequest(a, b, this.server, forcible, masterSystemTime,user)); - if (LOG.isDebugEnabled()) { - LOG.debug("Region merge requested for " + a + "," + b + ", forcible=" - + forcible + ". " + this); - } - } catch (RejectedExecutionException ree) { - LOG.warn("Could not execute merge for " + a + "," + b + ", forcible=" - + forcible, ree); - } - } - public synchronized boolean requestSplit(final Region r) { // don't split regions that are blocking if (shouldSplitRegion() && ((HRegion)r).getCompactPriority() >= Store.PRIORITY_USER) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index a5172bb..b574c50 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -6881,112 +6881,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi (Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0)); } - /** - * Merge two HRegions. The regions must be adjacent and must not overlap. - * - * @return new merged HRegion - * @throws IOException - */ - public static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB) - throws IOException { - HRegion a = srcA; - HRegion b = srcB; - - // Make sure that srcA comes first; important for key-ordering during - // write of the merged file. - if (srcA.getRegionInfo().getStartKey() == null) { - if (srcB.getRegionInfo().getStartKey() == null) { - throw new IOException("Cannot merge two regions with null start key"); - } - // A's start key is null but B's isn't. Assume A comes before B - } else if ((srcB.getRegionInfo().getStartKey() == null) || - (Bytes.compareTo(srcA.getRegionInfo().getStartKey(), - srcB.getRegionInfo().getStartKey()) > 0)) { - a = srcB; - b = srcA; - } - - if (!(Bytes.compareTo(a.getRegionInfo().getEndKey(), - b.getRegionInfo().getStartKey()) == 0)) { - throw new IOException("Cannot merge non-adjacent regions"); - } - return merge(a, b); - } - - /** - * Merge two regions whether they are adjacent or not. - * - * @param a region a - * @param b region b - * @return new merged region - * @throws IOException - */ - public static HRegion merge(final HRegion a, final HRegion b) throws IOException { - if (!a.getRegionInfo().getTable().equals(b.getRegionInfo().getTable())) { - throw new IOException("Regions do not belong to the same table"); - } - - FileSystem fs = a.getRegionFileSystem().getFileSystem(); - // Make sure each region's cache is empty - a.flush(true); - b.flush(true); - - // Compact each region so we only have one store file per family - a.compact(true); - if (LOG.isDebugEnabled()) { - LOG.debug("Files for region: " + a); - a.getRegionFileSystem().logFileSystemState(LOG); - } - b.compact(true); - if (LOG.isDebugEnabled()) { - LOG.debug("Files for region: " + b); - b.getRegionFileSystem().logFileSystemState(LOG); - } - - RegionMergeTransactionImpl rmt = new RegionMergeTransactionImpl(a, b, true); - if (!rmt.prepare(null)) { - throw new IOException("Unable to merge regions " + a + " and " + b); - } - HRegionInfo mergedRegionInfo = rmt.getMergedRegionInfo(); - LOG.info("starting merge of regions: " + a + " and " + b - + " into new region " + mergedRegionInfo.getRegionNameAsString() - + " with start key <" - + Bytes.toStringBinary(mergedRegionInfo.getStartKey()) - + "> and end key <" - + Bytes.toStringBinary(mergedRegionInfo.getEndKey()) + ">"); - HRegion dstRegion; - try { - dstRegion = (HRegion)rmt.execute(null, null); - } catch (IOException ioe) { - rmt.rollback(null, null); - throw new IOException("Failed merging region " + a + " and " + b - + ", and successfully rolled back"); - } - dstRegion.compact(true); - - if (LOG.isDebugEnabled()) { - LOG.debug("Files for new region"); - dstRegion.getRegionFileSystem().logFileSystemState(LOG); - } - - // clear the compacted files if any - for (Store s : dstRegion.getStores()) { - s.closeAndArchiveCompactedFiles(); - } - if (dstRegion.getRegionFileSystem().hasReferences(dstRegion.getTableDesc())) { - throw new IOException("Merged region " + dstRegion - + " still has references after the compaction, is compaction canceled?"); - } - - // Archiving the 'A' region - HFileArchiver.archiveRegion(a.getBaseConf(), fs, a.getRegionInfo()); - // Archiving the 'B' region - HFileArchiver.archiveRegion(b.getBaseConf(), fs, b.getRegionInfo()); - - LOG.info("merge completed. New region is " + dstRegion); - return dstRegion; - } - @Override public Result get(final Get get) throws IOException { prepareGet(get); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 7b5f799..1e9f16b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -115,8 +115,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; @@ -1642,46 +1640,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } /** - * Merge regions on the region server. - * - * @param controller the RPC controller - * @param request the request - * @return merge regions response - * @throws ServiceException - */ - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public MergeRegionsResponse mergeRegions(final RpcController controller, - final MergeRegionsRequest request) throws ServiceException { - try { - checkOpen(); - requestCount.increment(); - Region regionA = getRegion(request.getRegionA()); - Region regionB = getRegion(request.getRegionB()); - boolean forcible = request.getForcible(); - long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1; - regionA.startRegionOperation(Operation.MERGE_REGION); - regionB.startRegionOperation(Operation.MERGE_REGION); - if (regionA.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || - regionB.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new ServiceException(new MergeRegionException("Can't merge non-default replicas")); - } - LOG.info("Receiving merging request for " + regionA + ", " + regionB - + ",forcible=" + forcible); - regionA.flush(true); - regionB.flush(true); - regionServer.compactSplitThread.requestRegionsMerge(regionA, regionB, forcible, - masterSystemTime, RpcServer.getRequestUser()); - return MergeRegionsResponse.newBuilder().build(); - } catch (DroppedSnapshotException ex) { - regionServer.abort("Replay of WAL required. Forcing server shutdown", ex); - throw new ServiceException(ex); - } catch (IOException ie) { - throw new ServiceException(ie); - } - } - - /** * Open asynchronously a region or a set of regions on the region server. * * The opening is coordinated by ZooKeeper, and this method requires the znode to be created diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java deleted file mode 100644 index ce69ad3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java +++ /dev/null @@ -1,153 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.DroppedSnapshotException; -import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.util.StringUtils; - -import com.google.common.base.Preconditions; - -/** - * Handles processing region merges. Put in a queue, owned by HRegionServer. - */ -@InterfaceAudience.Private -class RegionMergeRequest implements Runnable { - private static final Log LOG = LogFactory.getLog(RegionMergeRequest.class); - private final HRegion region_a; - private final HRegion region_b; - private final HRegionServer server; - private final boolean forcible; - private TableLock tableLock; - private final long masterSystemTime; - private final User user; - - RegionMergeRequest(Region a, Region b, HRegionServer hrs, boolean forcible, - long masterSystemTime, User user) { - Preconditions.checkNotNull(hrs); - this.region_a = (HRegion)a; - this.region_b = (HRegion)b; - this.server = hrs; - this.forcible = forcible; - this.masterSystemTime = masterSystemTime; - this.user = user; - } - - @Override - public String toString() { - return "MergeRequest,regions:" + region_a + ", " + region_b + ", forcible=" - + forcible; - } - - @Override - public void run() { - if (this.server.isStopping() || this.server.isStopped()) { - LOG.debug("Skipping merge because server is stopping=" - + this.server.isStopping() + " or stopped=" + this.server.isStopped()); - return; - } - try { - final long startTime = EnvironmentEdgeManager.currentTime(); - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, - region_b, forcible, masterSystemTime); - - //acquire a shared read lock on the table, so that table schema modifications - //do not happen concurrently - tableLock = server.getTableLockManager().readLock(region_a.getTableDesc().getTableName() - , "MERGE_REGIONS:" + region_a.getRegionInfo().getRegionNameAsString() + ", " + - region_b.getRegionInfo().getRegionNameAsString()); - try { - tableLock.acquire(); - } catch (IOException ex) { - tableLock = null; - throw ex; - } - - // If prepare does not return true, for some reason -- logged inside in - // the prepare call -- we are not ready to merge just now. Just return. - if (!mt.prepare(this.server)) return; - try { - mt.execute(this.server, this.server, this.user); - } catch (Exception e) { - if (this.server.isStopping() || this.server.isStopped()) { - LOG.info( - "Skip rollback/cleanup of failed merge of " + region_a + " and " - + region_b + " because server is" - + (this.server.isStopping() ? " stopping" : " stopped"), e); - return; - } - if (e instanceof DroppedSnapshotException) { - server.abort("Replay of WAL required. Forcing server shutdown", e); - return; - } - try { - LOG.warn("Running rollback/cleanup of failed merge of " - + region_a +" and "+ region_b + "; " + e.getMessage(), e); - if (mt.rollback(this.server, this.server)) { - LOG.info("Successful rollback of failed merge of " - + region_a +" and "+ region_b); - } else { - this.server.abort("Abort; we got an error after point-of-no-return" - + "when merging " + region_a + " and " + region_b); - } - } catch (RuntimeException ee) { - String msg = "Failed rollback of failed merge of " - + region_a +" and "+ region_b + " -- aborting server"; - // If failed rollback, kill this server to avoid having a hole in - // table. - LOG.info(msg, ee); - this.server.abort(msg); - } - return; - } - LOG.info("Regions merged, hbase:meta updated, and report to master. region_a=" - + region_a + ", region_b=" + region_b + ",merged region=" - + mt.getMergedRegionInfo().getRegionNameAsString() - + ". Region merge took " - + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTime(), startTime)); - } catch (IOException ex) { - ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex; - LOG.error("Merge failed " + this, ex); - server.checkFileSystem(); - } finally { - releaseTableLock(); - } - } - - protected void releaseTableLock() { - if (this.tableLock != null) { - try { - this.tableLock.release(); - } catch (IOException ex) { - LOG.error("Could not release the table lock (something is really wrong). " - + "Aborting this server to avoid holding the lock forever."); - this.server.abort("Abort; we got an error when releasing the table lock " - + "on " + region_a.getRegionInfo().getRegionNameAsString()); - } - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java deleted file mode 100644 index c844d54..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.ReflectionUtils; - -/** - * A factory for creating RegionMergeTransactions, which execute region split as a "transaction". - * See {@link RegionMergeTransactionImpl} - */ -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -@InterfaceStability.Evolving -public class RegionMergeTransactionFactory implements Configurable { - - public static final String MERGE_TRANSACTION_IMPL_KEY = - "hbase.regionserver.merge.transaction.impl"; - - private Configuration conf; - - public RegionMergeTransactionFactory(Configuration conf) { - this.conf = conf; - } - - @Override - public Configuration getConf() { - return conf; - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - } - - /** - * Create a merge transaction - * @param a region a to merge - * @param b region b to merge - * @param forcible if false, we will only merge adjacent regions - * @return transaction instance - */ - public RegionMergeTransactionImpl create(final Region a, final Region b, - final boolean forcible) { - // The implementation class must extend RegionMergeTransactionImpl, not only - // implement the RegionMergeTransaction interface like you might expect, - // because various places such as AssignmentManager use static methods - // from RegionMergeTransactionImpl. Whatever we use for implementation must - // be compatible, so it's safest to require ? extends RegionMergeTransactionImpl. - // If not compatible we will throw a runtime exception from here. - return ReflectionUtils.instantiateWithCustomCtor( - conf.getClass(MERGE_TRANSACTION_IMPL_KEY, RegionMergeTransactionImpl.class, - RegionMergeTransactionImpl.class).getName(), - new Class[] { Region.class, Region.class, boolean.class }, - new Object[] { a, b, forcible }); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java deleted file mode 100644 index 5c86429..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java +++ /dev/null @@ -1,742 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitationsME - * under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.MetaMutationAnnotation; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.LoggingProgressable; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Pair; - -import com.google.common.annotations.VisibleForTesting; - -@InterfaceAudience.Private -public class RegionMergeTransactionImpl implements RegionMergeTransaction { - private static final Log LOG = LogFactory.getLog(RegionMergeTransactionImpl.class); - - // Merged region info - private HRegionInfo mergedRegionInfo; - // region_a sorts before region_b - private final HRegion region_a; - private final HRegion region_b; - // merges dir is under region_a - private final Path mergesdir; - // We only merge adjacent regions if forcible is false - private final boolean forcible; - private final long masterSystemTime; - - /* - * Transaction state for listener, only valid during execute and - * rollback - */ - private RegionMergeTransactionPhase currentPhase = RegionMergeTransactionPhase.STARTED; - private Server server; - private RegionServerServices rsServices; - - public static class JournalEntryImpl implements JournalEntry { - private RegionMergeTransactionPhase type; - private long timestamp; - - public JournalEntryImpl(RegionMergeTransactionPhase type) { - this(type, EnvironmentEdgeManager.currentTime()); - } - - public JournalEntryImpl(RegionMergeTransactionPhase type, long timestamp) { - this.type = type; - this.timestamp = timestamp; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(type); - sb.append(" at "); - sb.append(timestamp); - return sb.toString(); - } - - @Override - public RegionMergeTransactionPhase getPhase() { - return type; - } - - @Override - public long getTimeStamp() { - return timestamp; - } - } - - /* - * Journal of how far the merge transaction has progressed. - */ - private final List journal = new ArrayList(); - - /** - * Listeners - */ - private final ArrayList listeners = new ArrayList(); - - private static IOException closedByOtherException = new IOException( - "Failed to close region: already closed by another thread"); - - private RegionServerCoprocessorHost rsCoprocessorHost = null; - - /** - * Constructor - * @param a region a to merge - * @param b region b to merge - * @param forcible if false, we will only merge adjacent regions - */ - public RegionMergeTransactionImpl(final Region a, final Region b, - final boolean forcible) { - this(a, b, forcible, EnvironmentEdgeManager.currentTime()); - } - /** - * Constructor - * @param a region a to merge - * @param b region b to merge - * @param forcible if false, we will only merge adjacent regions - * @param masterSystemTime the time at the master side - */ - public RegionMergeTransactionImpl(final Region a, final Region b, - final boolean forcible, long masterSystemTime) { - if (a.getRegionInfo().compareTo(b.getRegionInfo()) <= 0) { - this.region_a = (HRegion)a; - this.region_b = (HRegion)b; - } else { - this.region_a = (HRegion)b; - this.region_b = (HRegion)a; - } - this.forcible = forcible; - this.masterSystemTime = masterSystemTime; - this.mergesdir = region_a.getRegionFileSystem().getMergesDir(); - } - - private void transition(RegionMergeTransactionPhase nextPhase) throws IOException { - transition(nextPhase, false); - } - - private void transition(RegionMergeTransactionPhase nextPhase, boolean isRollback) - throws IOException { - if (!isRollback) { - // Add to the journal first, because if the listener throws an exception - // we need to roll back starting at 'nextPhase' - this.journal.add(new JournalEntryImpl(nextPhase)); - } - for (int i = 0; i < listeners.size(); i++) { - TransactionListener listener = listeners.get(i); - if (!isRollback) { - listener.transition(this, currentPhase, nextPhase); - } else { - listener.rollback(this, currentPhase, nextPhase); - } - } - currentPhase = nextPhase; - } - - @Override - public boolean prepare(final RegionServerServices services) throws IOException { - if (!region_a.getTableDesc().getTableName() - .equals(region_b.getTableDesc().getTableName())) { - LOG.info("Can't merge regions " + region_a + "," + region_b - + " because they do not belong to the same table"); - return false; - } - if (region_a.getRegionInfo().equals(region_b.getRegionInfo())) { - LOG.info("Can't merge the same region " + region_a); - return false; - } - if (!forcible && !HRegionInfo.areAdjacent(region_a.getRegionInfo(), - region_b.getRegionInfo())) { - String msg = "Skip merging " + region_a.getRegionInfo().getRegionNameAsString() - + " and " + region_b.getRegionInfo().getRegionNameAsString() - + ", because they are not adjacent."; - LOG.info(msg); - return false; - } - if (!this.region_a.isMergeable() || !this.region_b.isMergeable()) { - return false; - } - try { - boolean regionAHasMergeQualifier = hasMergeQualifierInMeta(services, - region_a.getRegionInfo().getRegionName()); - if (regionAHasMergeQualifier || - hasMergeQualifierInMeta(services, region_b.getRegionInfo().getRegionName())) { - LOG.debug("Region " + (regionAHasMergeQualifier ? - region_a.getRegionInfo().getRegionNameAsString() - : region_b.getRegionInfo().getRegionNameAsString()) - + " is not mergeable because it has merge qualifier in META"); - return false; - } - } catch (IOException e) { - LOG.warn("Failed judging whether merge transaction is available for " - + region_a.getRegionInfo().getRegionNameAsString() + " and " - + region_b.getRegionInfo().getRegionNameAsString(), e); - return false; - } - - // WARN: make sure there is no parent region of the two merging regions in - // hbase:meta If exists, fixing up daughters would cause daughter regions(we - // have merged one) online again when we restart master, so we should clear - // the parent region to prevent the above case - // Since HBASE-7721, we don't need fix up daughters any more. so here do - // nothing - - this.mergedRegionInfo = getMergedRegionInfo(region_a.getRegionInfo(), - region_b.getRegionInfo()); - - transition(RegionMergeTransactionPhase.PREPARED); - return true; - } - - @Override - public Region execute(final Server server, final RegionServerServices services) - throws IOException { - if (User.isHBaseSecurityEnabled(region_a.getBaseConf())) { - LOG.warn("Should use execute(Server, RegionServerServices, User)"); - } - return execute(server, services, null); - } - - @Override - public Region execute(final Server server, final RegionServerServices services, User user) - throws IOException { - this.server = server; - this.rsServices = services; - if (rsCoprocessorHost == null) { - rsCoprocessorHost = server != null ? - ((HRegionServer) server).getRegionServerCoprocessorHost() : null; - } - final HRegion mergedRegion = createMergedRegion(server, services, user); - if (rsCoprocessorHost != null) { - rsCoprocessorHost.postMergeCommit(this.region_a, this.region_b, mergedRegion, user); - } - stepsAfterPONR(server, services, mergedRegion, user); - - transition(RegionMergeTransactionPhase.COMPLETED); - - return mergedRegion; - } - - @VisibleForTesting - public void stepsAfterPONR(final Server server, final RegionServerServices services, - final HRegion mergedRegion, User user) throws IOException { - openMergedRegion(server, services, mergedRegion); - if (rsCoprocessorHost != null) { - rsCoprocessorHost.postMerge(region_a, region_b, mergedRegion, user); - } - } - - /** - * Prepare the merged region and region files. - * @param server Hosting server instance. Can be null when testing - * @param services Used to online/offline regions. - * @return merged region - * @throws IOException If thrown, transaction failed. Call - * {@link #rollback(Server, RegionServerServices)} - */ - private HRegion createMergedRegion(final Server server, final RegionServerServices services, - User user) throws IOException { - LOG.info("Starting merge of " + region_a + " and " - + region_b.getRegionInfo().getRegionNameAsString() + ", forcible=" + forcible); - if ((server != null && server.isStopped()) - || (services != null && services.isStopping())) { - throw new IOException("Server is stopped or stopping"); - } - - if (rsCoprocessorHost != null) { - boolean ret = rsCoprocessorHost.preMerge(region_a, region_b, user); - if (ret) { - throw new IOException("Coprocessor bypassing regions " + this.region_a + " " - + this.region_b + " merge."); - } - } - - // If true, no cluster to write meta edits to or to use coordination. - boolean testing = server == null ? true : server.getConfiguration() - .getBoolean("hbase.testing.nocluster", false); - - HRegion mergedRegion = stepsBeforePONR(server, services, testing); - - @MetaMutationAnnotation - final List metaEntries = new ArrayList(); - if (rsCoprocessorHost != null) { - boolean ret = rsCoprocessorHost.preMergeCommit(region_a, region_b, metaEntries, user); - - if (ret) { - throw new IOException("Coprocessor bypassing regions " + this.region_a + " " - + this.region_b + " merge."); - } - try { - for (Mutation p : metaEntries) { - HRegionInfo.parseRegionName(p.getRow()); - } - } catch (IOException e) { - LOG.error("Row key of mutation from coprocessor is not parsable as region name." - + "Mutations from coprocessor should only be for hbase:meta table.", e); - throw e; - } - } - - // This is the point of no return. Similar with SplitTransaction. - // IF we reach the PONR then subsequent failures need to crash out this - // regionserver - transition(RegionMergeTransactionPhase.PONR); - - // Add merged region and delete region_a and region_b - // as an atomic update. See HBASE-7721. This update to hbase:meta makes the region - // will determine whether the region is merged or not in case of failures. - // If it is successful, master will roll-forward, if not, master will - // rollback - if (services != null && !services.reportRegionStateTransition(TransitionCode.MERGE_PONR, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - // Passed PONR, let SSH clean it up - throw new IOException("Failed to notify master that merge passed PONR: " - + region_a.getRegionInfo().getRegionNameAsString() + " and " - + region_b.getRegionInfo().getRegionNameAsString()); - } - return mergedRegion; - } - - @VisibleForTesting - public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA, - HRegionInfo regionB, ServerName serverName, List mutations) throws IOException { - HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion); - - // use the maximum of what master passed us vs local time. - long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime); - - // Put for parent - Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged, time); - putOfMerged.addColumn(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, - regionA.toByteArray()); - putOfMerged.addColumn(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, - regionB.toByteArray()); - mutations.add(putOfMerged); - // Deletes for merging regions - Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA, time); - Delete deleteB = MetaTableAccessor.makeDeleteFromRegionInfo(regionB, time); - mutations.add(deleteA); - mutations.add(deleteB); - // The merged is a new region, openSeqNum = 1 is fine. - addLocation(putOfMerged, serverName, 1); - } - - @VisibleForTesting - Put addLocation(final Put p, final ServerName sn, long openSeqNum) { - p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes - .toBytes(sn.getHostAndPort())); - p.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn - .getStartcode())); - p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum)); - return p; - } - - @VisibleForTesting - public HRegion stepsBeforePONR(final Server server, final RegionServerServices services, - boolean testing) throws IOException { - if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_MERGE, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - throw new IOException("Failed to get ok from master to merge " - + region_a.getRegionInfo().getRegionNameAsString() + " and " - + region_b.getRegionInfo().getRegionNameAsString()); - } - - transition(RegionMergeTransactionPhase.SET_MERGING); - - this.region_a.getRegionFileSystem().createMergesDir(); - - transition(RegionMergeTransactionPhase.CREATED_MERGE_DIR); - - Map> hstoreFilesOfRegionA = closeAndOfflineRegion( - services, this.region_a, true, testing); - Map> hstoreFilesOfRegionB = closeAndOfflineRegion( - services, this.region_b, false, testing); - - assert hstoreFilesOfRegionA != null && hstoreFilesOfRegionB != null; - - // mergeStoreFiles creates merged region dirs under the region_a merges dir - // Nothing to unroll here if failure -- clean up of CREATE_MERGE_DIR will - // clean this up. - mergeStoreFiles(hstoreFilesOfRegionA, hstoreFilesOfRegionB); - - // Log to the journal that we are creating merged region. We could fail - // halfway through. If we do, we could have left - // stuff in fs that needs cleanup -- a storefile or two. Thats why we - // add entry to journal BEFORE rather than AFTER the change. - - transition(RegionMergeTransactionPhase.STARTED_MERGED_REGION_CREATION); - - HRegion mergedRegion = createMergedRegionFromMerges(this.region_a, - this.region_b, this.mergedRegionInfo); - return mergedRegion; - } - - /** - * Create a merged region from the merges directory under region a. In order - * to mock it for tests, place it with a new method. - * @param a hri of region a - * @param b hri of region b - * @param mergedRegion hri of merged region - * @return merged HRegion. - * @throws IOException - */ - @VisibleForTesting - HRegion createMergedRegionFromMerges(final HRegion a, final HRegion b, - final HRegionInfo mergedRegion) throws IOException { - return a.createMergedRegionFromMerges(mergedRegion, b); - } - - /** - * Close the merging region and offline it in regionserver - * @param services - * @param region - * @param isRegionA true if it is merging region a, false if it is region b - * @param testing true if it is testing - * @return a map of family name to list of store files - * @throws IOException - */ - private Map> closeAndOfflineRegion( - final RegionServerServices services, final HRegion region, - final boolean isRegionA, final boolean testing) throws IOException { - Map> hstoreFilesToMerge = null; - Exception exceptionToThrow = null; - try { - hstoreFilesToMerge = region.close(false); - } catch (Exception e) { - exceptionToThrow = e; - } - if (exceptionToThrow == null && hstoreFilesToMerge == null) { - // The region was closed by a concurrent thread. We can't continue - // with the merge, instead we must just abandon the merge. If we - // reopen or merge this could cause problems because the region has - // probably already been moved to a different server, or is in the - // process of moving to a different server. - exceptionToThrow = closedByOtherException; - } - if (exceptionToThrow != closedByOtherException) { - transition(isRegionA ? RegionMergeTransactionPhase.CLOSED_REGION_A - : RegionMergeTransactionPhase.CLOSED_REGION_B); - } - if (exceptionToThrow != null) { - if (exceptionToThrow instanceof IOException) - throw (IOException) exceptionToThrow; - throw new IOException(exceptionToThrow); - } - if (!testing) { - services.removeFromOnlineRegions(region, null); - } - - transition(isRegionA ? RegionMergeTransactionPhase.OFFLINED_REGION_A - : RegionMergeTransactionPhase.OFFLINED_REGION_B); - - return hstoreFilesToMerge; - } - - /** - * Get merged region info through the specified two regions - * @param a merging region A - * @param b merging region B - * @return the merged region info - */ - @VisibleForTesting - static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b) { - long rid = EnvironmentEdgeManager.currentTime(); - // Regionid is timestamp. Merged region's id can't be less than that of - // merging regions else will insert at wrong location in hbase:meta - if (rid < a.getRegionId() || rid < b.getRegionId()) { - LOG.warn("Clock skew; merging regions id are " + a.getRegionId() - + " and " + b.getRegionId() + ", but current time here is " + rid); - rid = Math.max(a.getRegionId(), b.getRegionId()) + 1; - } - - byte[] startKey = null; - byte[] endKey = null; - // Choose the smaller as start key - if (a.compareTo(b) <= 0) { - startKey = a.getStartKey(); - } else { - startKey = b.getStartKey(); - } - // Choose the bigger as end key - if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) - || (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) - && Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) { - endKey = a.getEndKey(); - } else { - endKey = b.getEndKey(); - } - - // Merged region is sorted between two merging regions in META - HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey, - endKey, false, rid); - return mergedRegionInfo; - } - - /** - * Perform time consuming opening of the merged region. - * @param server Hosting server instance. Can be null when testing - * @param services Used to online/offline regions. - * @param merged the merged region - * @throws IOException If thrown, transaction failed. Call - * {@link #rollback(Server, RegionServerServices)} - */ - @VisibleForTesting - void openMergedRegion(final Server server, final RegionServerServices services, - HRegion merged) throws IOException { - boolean stopped = server != null && server.isStopped(); - boolean stopping = services != null && services.isStopping(); - if (stopped || stopping) { - LOG.info("Not opening merged region " + merged.getRegionInfo().getRegionNameAsString() - + " because stopping=" + stopping + ", stopped=" + stopped); - return; - } - HRegionInfo hri = merged.getRegionInfo(); - LoggingProgressable reporter = server == null ? null - : new LoggingProgressable(hri, server.getConfiguration().getLong( - "hbase.regionserver.regionmerge.open.log.interval", 10000)); - merged.openHRegion(reporter); - - if (services != null) { - if (!services.reportRegionStateTransition(TransitionCode.MERGED, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - throw new IOException("Failed to report merged region to master: " - + mergedRegionInfo.getShortNameToLog()); - } - services.addToOnlineRegions(merged); - } - } - - /** - * Create reference file(s) of merging regions under the region_a merges dir - * @param hstoreFilesOfRegionA - * @param hstoreFilesOfRegionB - * @throws IOException - */ - private void mergeStoreFiles( - Map> hstoreFilesOfRegionA, - Map> hstoreFilesOfRegionB) - throws IOException { - // Create reference file(s) of region A in mergdir - HRegionFileSystem fs_a = this.region_a.getRegionFileSystem(); - for (Map.Entry> entry : hstoreFilesOfRegionA - .entrySet()) { - String familyName = Bytes.toString(entry.getKey()); - for (StoreFile storeFile : entry.getValue()) { - fs_a.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, - this.mergesdir); - } - } - // Create reference file(s) of region B in mergedir - HRegionFileSystem fs_b = this.region_b.getRegionFileSystem(); - for (Map.Entry> entry : hstoreFilesOfRegionB - .entrySet()) { - String familyName = Bytes.toString(entry.getKey()); - for (StoreFile storeFile : entry.getValue()) { - fs_b.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, - this.mergesdir); - } - } - } - - @Override - public boolean rollback(final Server server, - final RegionServerServices services) throws IOException { - if (User.isHBaseSecurityEnabled(region_a.getBaseConf())) { - LOG.warn("Should use execute(Server, RegionServerServices, User)"); - } - return rollback(server, services, null); - } - - @Override - public boolean rollback(final Server server, - final RegionServerServices services, User user) throws IOException { - assert this.mergedRegionInfo != null; - this.server = server; - this.rsServices = services; - // Coprocessor callback - if (rsCoprocessorHost != null) { - rsCoprocessorHost.preRollBackMerge(region_a, region_b, user); - } - - boolean result = true; - ListIterator iterator = this.journal - .listIterator(this.journal.size()); - // Iterate in reverse. - while (iterator.hasPrevious()) { - JournalEntry je = iterator.previous(); - - transition(je.getPhase(), true); - - switch (je.getPhase()) { - - case SET_MERGING: - if (services != null - && !services.reportRegionStateTransition(TransitionCode.MERGE_REVERTED, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - return false; - } - break; - - case CREATED_MERGE_DIR: - this.region_a.writestate.writesEnabled = true; - this.region_b.writestate.writesEnabled = true; - this.region_a.getRegionFileSystem().cleanupMergesDir(); - break; - - case CLOSED_REGION_A: - try { - // So, this returns a seqid but if we just closed and then reopened, - // we should be ok. On close, we flushed using sequenceid obtained - // from hosting regionserver so no need to propagate the sequenceid - // returned out of initialize below up into regionserver as we - // normally do. - this.region_a.initialize(); - } catch (IOException e) { - LOG.error("Failed rollbacking CLOSED_REGION_A of region " - + region_a.getRegionInfo().getRegionNameAsString(), e); - throw new RuntimeException(e); - } - break; - - case OFFLINED_REGION_A: - if (services != null) - services.addToOnlineRegions(this.region_a); - break; - - case CLOSED_REGION_B: - try { - this.region_b.initialize(); - } catch (IOException e) { - LOG.error("Failed rollbacking CLOSED_REGION_A of region " - + region_b.getRegionInfo().getRegionNameAsString(), e); - throw new RuntimeException(e); - } - break; - - case OFFLINED_REGION_B: - if (services != null) - services.addToOnlineRegions(this.region_b); - break; - - case STARTED_MERGED_REGION_CREATION: - this.region_a.getRegionFileSystem().cleanupMergedRegion( - this.mergedRegionInfo); - break; - - case PONR: - // We got to the point-of-no-return so we need to just abort. Return - // immediately. Do not clean up created merged regions. - return false; - - // Informational states only - case STARTED: - case PREPARED: - case COMPLETED: - break; - - default: - throw new RuntimeException("Unhandled journal entry: " + je); - } - } - // Coprocessor callback - if (rsCoprocessorHost != null) { - rsCoprocessorHost.postRollBackMerge(region_a, region_b, user); - } - - return result; - } - - @Override - public HRegionInfo getMergedRegionInfo() { - return this.mergedRegionInfo; - } - - @VisibleForTesting - Path getMergesDir() { - return this.mergesdir; - } - - /** - * Checks if the given region has merge qualifier in hbase:meta - * @param services - * @param regionName name of specified region - * @return true if the given region has merge qualifier in META.(It will be - * cleaned by CatalogJanitor) - * @throws IOException - */ - @VisibleForTesting - boolean hasMergeQualifierInMeta(final RegionServerServices services, final byte[] regionName) - throws IOException { - if (services == null) return false; - // Get merge regions if it is a merged region and already has merge - // qualifier - Pair mergeRegions = MetaTableAccessor - .getRegionsFromMergeQualifier(services.getConnection(), regionName); - if (mergeRegions != null && - (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) { - // It has merge qualifier - return true; - } - return false; - } - - @Override - public List getJournal() { - return journal; - } - - @Override - public RegionMergeTransaction registerTransactionListener(TransactionListener listener) { - listeners.add(listener); - return this; - } - - @Override - public Server getServer() { - return server; - } - - @Override - public RegionServerServices getRegionServerServices() { - return rsServices; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java deleted file mode 100644 index 307568c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Random; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotDisabledException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.ipc.RemoteException; - -/** - * A non-instantiable class that has a static method capable of compacting - * a table by merging adjacent regions. - */ -@InterfaceAudience.Private -class HMerge { - // TODO: Where is this class used? How does it relate to Merge in same package? - private static final Log LOG = LogFactory.getLog(HMerge.class); - static final Random rand = new Random(); - - /* - * Not instantiable - */ - private HMerge() { - super(); - } - - /** - * Scans the table and merges two adjacent regions if they are small. This - * only happens when a lot of rows are deleted. - * - * When merging the hbase:meta region, the HBase instance must be offline. - * When merging a normal table, the HBase instance must be online, but the - * table must be disabled. - * - * @param conf - configuration object for HBase - * @param fs - FileSystem where regions reside - * @param tableName - Table to be compacted - * @throws IOException - */ - public static void merge(Configuration conf, FileSystem fs, - final TableName tableName) - throws IOException { - merge(conf, fs, tableName, true); - } - - /** - * Scans the table and merges two adjacent regions if they are small. This - * only happens when a lot of rows are deleted. - * - * When merging the hbase:meta region, the HBase instance must be offline. - * When merging a normal table, the HBase instance must be online, but the - * table must be disabled. - * - * @param conf - configuration object for HBase - * @param fs - FileSystem where regions reside - * @param tableName - Table to be compacted - * @param testMasterRunning True if we are to verify master is down before - * running merge - * @throws IOException - */ - public static void merge(Configuration conf, FileSystem fs, - final TableName tableName, final boolean testMasterRunning) - throws IOException { - boolean masterIsRunning = false; - ClusterConnection hConnection = null; - if (testMasterRunning) { - try { - hConnection = (ClusterConnection) ConnectionFactory.createConnection(conf); - masterIsRunning = hConnection.isMasterRunning(); - } finally { - if (hConnection != null) { - hConnection.close(); - } - } - } - if (tableName.equals(TableName.META_TABLE_NAME)) { - if (masterIsRunning) { - throw new IllegalStateException( - "Can not compact hbase:meta table if instance is on-line"); - } - // TODO reenable new OfflineMerger(conf, fs).process(); - } else { - if(!masterIsRunning) { - throw new IllegalStateException( - "HBase instance must be running to merge a normal table"); - } - try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()) { - if (!admin.isTableDisabled(tableName)) { - throw new TableNotDisabledException(tableName); - } - } - new OnlineMerger(conf, fs, tableName).process(); - } - } - - private static abstract class Merger { - protected final Configuration conf; - protected final FileSystem fs; - protected final Path rootDir; - protected final HTableDescriptor htd; - protected final WALFactory walFactory; - private final long maxFilesize; - - - protected Merger(Configuration conf, FileSystem fs, final TableName tableName) - throws IOException { - this.conf = conf; - this.fs = fs; - this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE); - - this.rootDir = FSUtils.getRootDir(conf); - Path tabledir = FSUtils.getTableDir(this.rootDir, tableName); - this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir); - String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME; - - final Configuration walConf = new Configuration(conf); - FSUtils.setRootDir(walConf, tabledir); - this.walFactory = new WALFactory(walConf, null, logname); - } - - void process() throws IOException { - try { - for (HRegionInfo[] regionsToMerge = next(); - regionsToMerge != null; - regionsToMerge = next()) { - if (!merge(regionsToMerge)) { - return; - } - } - } finally { - try { - walFactory.close(); - } catch(IOException e) { - LOG.error(e); - } - } - } - - protected boolean merge(final HRegionInfo[] info) throws IOException { - if (info.length < 2) { - LOG.info("only one region - nothing to merge"); - return false; - } - - HRegion currentRegion = null; - long currentSize = 0; - HRegion nextRegion = null; - long nextSize = 0; - for (int i = 0; i < info.length - 1; i++) { - if (currentRegion == null) { - currentRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i], this.htd, - walFactory.getWAL(info[i].getEncodedNameAsBytes(), - info[i].getTable().getNamespace())); - currentSize = currentRegion.getLargestHStoreSize(); - } - nextRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i + 1], this.htd, - walFactory.getWAL(info[i + 1].getEncodedNameAsBytes(), - info[i + 1].getTable().getNamespace())); - nextSize = nextRegion.getLargestHStoreSize(); - - if ((currentSize + nextSize) <= (maxFilesize / 2)) { - // We merge two adjacent regions if their total size is less than - // one half of the desired maximum size - LOG.info("Merging regions " + currentRegion.getRegionInfo().getRegionNameAsString() + - " and " + nextRegion.getRegionInfo().getRegionNameAsString()); - HRegion mergedRegion = - HRegion.mergeAdjacent(currentRegion, nextRegion); - updateMeta(currentRegion.getRegionInfo().getRegionName(), - nextRegion.getRegionInfo().getRegionName(), mergedRegion); - break; - } - LOG.info("not merging regions " + - Bytes.toStringBinary(currentRegion.getRegionInfo().getRegionName()) + - " and " + Bytes.toStringBinary(nextRegion.getRegionInfo().getRegionName())); - currentRegion.close(); - currentRegion = nextRegion; - currentSize = nextSize; - } - if(currentRegion != null) { - currentRegion.close(); - } - return true; - } - - protected abstract HRegionInfo[] next() throws IOException; - - protected abstract void updateMeta(final byte [] oldRegion1, - final byte [] oldRegion2, HRegion newRegion) - throws IOException; - - } - - /** Instantiated to compact a normal user table */ - private static class OnlineMerger extends Merger { - private final TableName tableName; - private final Table table; - private final ResultScanner metaScanner; - private HRegionInfo latestRegion; - - OnlineMerger(Configuration conf, FileSystem fs, - final TableName tableName) - throws IOException { - super(conf, fs, tableName); - this.tableName = tableName; - Connection connection = ConnectionFactory.createConnection(conf); - this.table = connection.getTable(TableName.META_TABLE_NAME); - this.metaScanner = table.getScanner(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - this.latestRegion = null; - } - - private HRegionInfo nextRegion() throws IOException { - try { - Result results = getMetaRow(); - if (results == null) { - return null; - } - HRegionInfo region = MetaTableAccessor.getHRegionInfo(results); - if (region == null) { - throw new NoSuchElementException("meta region entry missing " + - Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + - Bytes.toString(HConstants.REGIONINFO_QUALIFIER)); - } - if (!region.getTable().equals(this.tableName)) { - return null; - } - return region; - } catch (IOException e) { - e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; - LOG.error("meta scanner error", e); - metaScanner.close(); - throw e; - } - } - - /* - * Check current row has a HRegionInfo. Skip to next row if HRI is empty. - * @return A Map of the row content else null if we are off the end. - * @throws IOException - */ - private Result getMetaRow() throws IOException { - Result currentRow = metaScanner.next(); - boolean foundResult = false; - while (currentRow != null) { - LOG.info("Row: <" + Bytes.toStringBinary(currentRow.getRow()) + ">"); - byte[] regionInfoValue = currentRow.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (regionInfoValue == null || regionInfoValue.length == 0) { - currentRow = metaScanner.next(); - continue; - } - HRegionInfo region = MetaTableAccessor.getHRegionInfo(currentRow); - if (!region.getTable().equals(this.tableName)) { - currentRow = metaScanner.next(); - continue; - } - foundResult = true; - break; - } - return foundResult ? currentRow : null; - } - - @Override - protected HRegionInfo[] next() throws IOException { - List regions = new ArrayList(); - if(latestRegion == null) { - latestRegion = nextRegion(); - } - if(latestRegion != null) { - regions.add(latestRegion); - } - latestRegion = nextRegion(); - if(latestRegion != null) { - regions.add(latestRegion); - } - return regions.toArray(new HRegionInfo[regions.size()]); - } - - @Override - protected void updateMeta(final byte [] oldRegion1, - final byte [] oldRegion2, - HRegion newRegion) - throws IOException { - byte[][] regionsToDelete = {oldRegion1, oldRegion2}; - for (int r = 0; r < regionsToDelete.length; r++) { - if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) { - latestRegion = null; - } - Delete delete = new Delete(regionsToDelete[r]); - table.delete(delete); - if(LOG.isDebugEnabled()) { - LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r])); - } - } - newRegion.getRegionInfo().setOffline(true); - - MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo()); - - if(LOG.isDebugEnabled()) { - LOG.debug("updated columns in row: " - + Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName())); - } - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java deleted file mode 100644 index 7b96660..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ /dev/null @@ -1,264 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.io.WritableComparator; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - -import com.google.common.base.Preconditions; - -/** - * Utility that can merge any two regions in the same table: adjacent, - * overlapping or disjoint. - */ -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -public class Merge extends Configured implements Tool { - private static final Log LOG = LogFactory.getLog(Merge.class); - private Path rootdir; - private volatile MetaUtils utils; - private TableName tableName; // Name of table - private volatile byte [] region1; // Name of region 1 - private volatile byte [] region2; // Name of region 2 - private volatile HRegionInfo mergeInfo = null; - - @Override - public int run(String[] args) throws Exception { - if (parseArgs(args) != 0) { - return -1; - } - - // Verify file system is up. - FileSystem fs = FileSystem.get(getConf()); // get DFS handle - LOG.info("Verifying that file system is available..."); - try { - FSUtils.checkFileSystemAvailable(fs); - } catch (IOException e) { - LOG.fatal("File system is not available", e); - return -1; - } - - // Verify HBase is down - LOG.info("Verifying that HBase is not running..."); - try { - HBaseAdmin.available(getConf()); - LOG.fatal("HBase cluster must be off-line, and is not. Aborting."); - return -1; - } catch (ZooKeeperConnectionException zkce) { - // If no zk, presume no master. - } - - // Initialize MetaUtils and and get the root of the HBase installation - - this.utils = new MetaUtils(getConf()); - this.rootdir = FSUtils.getRootDir(getConf()); - try { - mergeTwoRegions(); - return 0; - } catch (IOException e) { - LOG.fatal("Merge failed", e); - return -1; - - } finally { - if (this.utils != null) { - this.utils.shutdown(); - } - } - } - - /** @return HRegionInfo for merge result */ - HRegionInfo getMergedHRegionInfo() { - return this.mergeInfo; - } - - /* - * Merges two regions from a user table. - */ - private void mergeTwoRegions() throws IOException { - LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " + - Bytes.toStringBinary(this.region2) + " in table " + this.tableName); - HRegion meta = this.utils.getMetaRegion(); - Get get = new Get(region1); - get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - Result result1 = meta.get(get); - Preconditions.checkState(!result1.isEmpty(), - "First region cells can not be null"); - HRegionInfo info1 = MetaTableAccessor.getHRegionInfo(result1); - if (info1 == null) { - throw new NullPointerException("info1 is null using key " + - Bytes.toStringBinary(region1) + " in " + meta); - } - get = new Get(region2); - get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - Result result2 = meta.get(get); - Preconditions.checkState(!result2.isEmpty(), - "Second region cells can not be null"); - HRegionInfo info2 = MetaTableAccessor.getHRegionInfo(result2); - if (info2 == null) { - throw new NullPointerException("info2 is null using key " + meta); - } - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), - this.rootdir, this.tableName); - HRegion merged = merge(htd, meta, info1, info2); - - LOG.info("Adding " + merged.getRegionInfo() + " to " + - meta.getRegionInfo()); - - HRegion.addRegionToMETA(meta, merged); - merged.close(); - } - - /* - * Actually merge two regions and update their info in the meta region(s) - * Returns HRegion object for newly merged region - */ - private HRegion merge(final HTableDescriptor htd, HRegion meta, - HRegionInfo info1, HRegionInfo info2) - throws IOException { - if (info1 == null) { - throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + - Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); - } - if (info2 == null) { - throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " + - Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); - } - HRegion merged = null; - HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf()); - try { - HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf()); - try { - merged = HRegion.merge(r1, r2); - } finally { - if (!r2.isClosed()) { - r2.close(); - } - } - } finally { - if (!r1.isClosed()) { - r1.close(); - } - } - - // Remove the old regions from meta. - // HRegion.merge has already deleted their files - - removeRegionFromMeta(meta, info1); - removeRegionFromMeta(meta, info2); - - this.mergeInfo = merged.getRegionInfo(); - return merged; - } - - /* - * Removes a region's meta information from the passed meta - * region. - * - * @param meta hbase:meta HRegion to be updated - * @param regioninfo HRegionInfo of region to remove from meta - * - * @throws IOException - */ - private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Removing region: " + regioninfo + " from " + meta); - } - - Delete delete = new Delete(regioninfo.getRegionName(), - System.currentTimeMillis()); - meta.delete(delete); - } - - /** - * Parse given arguments and assign table name and regions names. - * (generic args are handled by ToolRunner.) - * - * @param args the arguments to parse - * - * @throws IOException - */ - private int parseArgs(String[] args) throws IOException { - if (args.length != 3) { - usage(); - return -1; - } - tableName = TableName.valueOf(args[0]); - - region1 = Bytes.toBytesBinary(args[1]); - region2 = Bytes.toBytesBinary(args[2]); - int status = 0; - if (notInTable(tableName, region1) || notInTable(tableName, region2)) { - status = -1; - } else if (Bytes.equals(region1, region2)) { - LOG.error("Can't merge a region with itself"); - status = -1; - } - return status; - } - - private boolean notInTable(final TableName tn, final byte [] rn) { - if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length, - rn, 0, tn.getName().length) != 0) { - LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " + - tn); - return true; - } - return false; - } - - private void usage() { - System.err - .println("For hadoop 0.21+, Usage: hbase org.apache.hadoop.hbase.util.Merge " - + "[-Dfs.defaultFS=hdfs://nn:port] \n"); - } - - public static void main(String[] args) { - int status; - try { - status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args); - } catch (Exception e) { - LOG.error("exiting due to error", e); - status = -1; - } - System.exit(status); - } -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 65cedda..b138c7d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -1189,15 +1189,17 @@ public class TestAdmin1 { assertTrue(gotException); // Try going to the master directly (that will skip the check in admin) try { - DispatchMergingRegionsRequest request = RequestConverter - .buildDispatchMergingRegionsRequest( - regions.get(1).getFirst().getEncodedNameAsBytes(), - regions.get(2).getFirst().getEncodedNameAsBytes(), + byte[][] nameofRegionsToMerge = new byte[2][]; + nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes(); + nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes(); + MergeTableRegionsRequest request = RequestConverter + .buildMergeTableRegionsRequest( + nameofRegionsToMerge, true, HConstants.NO_NONCE, - HConstants.NO_NONCE); + HConstants.NO_NONCE); ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster() - .dispatchMergingRegions(null, request); + .mergeTableRegions(null, request); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException m) { Throwable t = m.getCause(); do { @@ -1209,40 +1211,6 @@ public class TestAdmin1 { } while (t != null); } assertTrue(gotException); - gotException = false; - // Try going to the regionservers directly - // first move the region to the same regionserver - if (!regions.get(2).getSecond().equals(regions.get(1).getSecond())) { - moveRegionAndWait(regions.get(2).getFirst(), regions.get(1).getSecond()); - } - try { - AdminService.BlockingInterface admin = ((ClusterConnection) TEST_UTIL.getAdmin() - .getConnection()).getAdmin(regions.get(1).getSecond()); - ProtobufUtil.mergeRegions(null, admin, regions.get(1).getFirst(), regions.get(2).getFirst(), - true, null); - } catch (MergeRegionException mm) { - gotException = true; - } - assertTrue(gotException); - } - - private void moveRegionAndWait(HRegionInfo destRegion, ServerName destServer) - throws InterruptedException, MasterNotRunningException, - ZooKeeperConnectionException, IOException { - HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - TEST_UTIL.getAdmin().move( - destRegion.getEncodedNameAsBytes(), - Bytes.toBytes(destServer.getServerName())); - while (true) { - ServerName serverName = master.getAssignmentManager() - .getRegionStates().getRegionServerOfRegion(destRegion); - if (serverName != null && serverName.equals(destServer)) { - TEST_UTIL.assertRegionOnServer( - destRegion, serverName, 200); - break; - } - Thread.sleep(10); - } } @Test (expected=IllegalArgumentException.class, timeout=300000) diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java deleted file mode 100644 index 4a62bff..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.coprocessor; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.RegionMergeTransactionFactory; -import org.apache.hadoop.hbase.regionserver.RegionMergeTransactionImpl; -import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; -import org.apache.hadoop.hbase.testclassification.CoprocessorTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.RegionServerObserver} - * interface hooks at all appropriate times during normal HMaster operations. - */ -@Category({CoprocessorTests.class, MediumTests.class}) -public class TestRegionServerObserver { - private static final Log LOG = LogFactory.getLog(TestRegionServerObserver.class); - - /** - * Test verifies the hooks in regions merge. - * @throws Exception - */ - @Ignore - @Test - public void testCoprocessorHooksInRegionsMerge() throws Exception { - final int NUM_MASTERS = 1; - final int NUM_RS = 1; - final String TABLENAME = "testRegionServerObserver"; - final String TABLENAME2 = "testRegionServerObserver_2"; - final byte[] FAM = Bytes.toBytes("fam"); - - // Create config to use for this cluster - Configuration conf = HBaseConfiguration.create(); - conf.setClass("hbase.coprocessor.regionserver.classes", CPRegionServerObserver.class, - RegionServerObserver.class); - - // Start the cluster - HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf); - TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); - Admin admin = TEST_UTIL.getHBaseAdmin(); - try { - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - HRegionServer regionServer = cluster.getRegionServer(0); - RegionServerCoprocessorHost cpHost = regionServer.getRegionServerCoprocessorHost(); - Coprocessor coprocessor = cpHost.findCoprocessor(CPRegionServerObserver.class.getName()); - CPRegionServerObserver regionServerObserver = (CPRegionServerObserver) coprocessor; - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLENAME)); - desc.addFamily(new HColumnDescriptor(FAM)); - admin.createTable(desc, new byte[][] { Bytes.toBytes("row") }); - desc = new HTableDescriptor(TableName.valueOf(TABLENAME2)); - desc.addFamily(new HColumnDescriptor(FAM)); - admin.createTable(desc, new byte[][] { Bytes.toBytes("row") }); - assertFalse(regionServerObserver.wasRegionMergeCalled()); - List regions = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)); - admin.mergeRegionsAsync( - regions.get(0).getRegionInfo().getEncodedNameAsBytes(), - regions.get(1).getRegionInfo().getEncodedNameAsBytes(), - true); - int regionsCount = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size(); - while (regionsCount != 1) { - regionsCount = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size(); - Thread.sleep(1000); - } - assertTrue(regionServerObserver.wasRegionMergeCalled()); - assertTrue(regionServerObserver.wasPreMergeCommit()); - assertTrue(regionServerObserver.wasPostMergeCommit()); - assertEquals(regionsCount, 1); - assertEquals(regionServer.getOnlineRegions(TableName.valueOf(TABLENAME2)).size(), 1); - } finally { - if (admin != null) admin.close(); - TEST_UTIL.shutdownMiniCluster(); - } - } - - public static class CPRegionServerObserver extends BaseRegionServerObserver { - private RegionMergeTransactionImpl rmt = null; - private HRegion mergedRegion = null; - - private boolean preMergeCalled; - private boolean preMergeBeforePONRCalled; - private boolean preMergeAfterPONRCalled; - private boolean preRollBackMergeCalled; - private boolean postRollBackMergeCalled; - private boolean postMergeCalled; - - public void resetStates() { - preMergeCalled = false; - preMergeBeforePONRCalled = false; - preMergeAfterPONRCalled = false; - preRollBackMergeCalled = false; - postRollBackMergeCalled = false; - postMergeCalled = false; - } - - @Override - public void preMerge(ObserverContext ctx, Region regionA, - Region regionB) throws IOException { - preMergeCalled = true; - } - - @Override - public void preMergeCommit(ObserverContext ctx, - Region regionA, Region regionB, List metaEntries) throws IOException { - preMergeBeforePONRCalled = true; - RegionServerCoprocessorEnvironment environment = ctx.getEnvironment(); - HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); - List onlineRegions = - rs.getOnlineRegions(TableName.valueOf("testRegionServerObserver_2")); - rmt = (RegionMergeTransactionImpl) new RegionMergeTransactionFactory(rs.getConfiguration()) - .create(onlineRegions.get(0), onlineRegions.get(1), true); - if (!rmt.prepare(rs)) { - LOG.error("Prepare for the region merge of table " - + onlineRegions.get(0).getTableDesc().getNameAsString() - + " failed. So returning null. "); - ctx.bypass(); - return; - } - mergedRegion = rmt.stepsBeforePONR(rs, rs, false); - rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(), - regionB.getRegionInfo(), rs.getServerName(), metaEntries); - MetaTableAccessor.mutateMetaTable(rs.getConnection(), metaEntries); - } - - @Override - public void postMergeCommit(ObserverContext ctx, - Region regionA, Region regionB, Region mr) throws IOException { - preMergeAfterPONRCalled = true; - RegionServerCoprocessorEnvironment environment = ctx.getEnvironment(); - HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); - rmt.stepsAfterPONR(rs, rs, this.mergedRegion, null); - } - - @Override - public void preRollBackMerge(ObserverContext ctx, - Region regionA, Region regionB) throws IOException { - preRollBackMergeCalled = true; - } - - @Override - public void postRollBackMerge(ObserverContext ctx, - Region regionA, Region regionB) throws IOException { - postRollBackMergeCalled = true; - } - - @Override - public void postMerge(ObserverContext c, Region regionA, - Region regionB, Region mergedRegion) throws IOException { - postMergeCalled = true; - } - - public boolean wasPreMergeCalled() { - return this.preMergeCalled; - } - - public boolean wasPostMergeCalled() { - return this.postMergeCalled; - } - - public boolean wasPreMergeCommit() { - return this.preMergeBeforePONRCalled; - } - - public boolean wasPostMergeCommit() { - return this.preMergeAfterPONRCalled; - } - - public boolean wasPreRollBackMerge() { - return this.preRollBackMergeCalled; - } - - public boolean wasPostRollBackMerge() { - return this.postRollBackMergeCalled; - } - - public boolean wasRegionMergeCalled() { - return this.preMergeCalled && this.postMergeCalled; - } - - } - -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 28bf14a..48cf8a5 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -320,16 +320,6 @@ public class MockNoopMasterServices implements MasterServices, Server { } @Override - public long dispatchMergingRegions( - final HRegionInfo region_a, - final HRegionInfo region_b, - final boolean forcible, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - @Override public boolean isActiveMaster() { return true; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 950ec92..c5f294a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -68,8 +68,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; @@ -531,13 +529,6 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public MergeRegionsResponse mergeRegions(RpcController controller, - MergeRegionsRequest request) throws ServiceException { - // TODO Auto-generated method stub - return null; - } - - @Override public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request) throws ServiceException { // TODO Auto-generated method stub diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java deleted file mode 100644 index c011321..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java +++ /dev/null @@ -1,264 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.procedure; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsState; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -@Category({MasterTests.class, MediumTests.class}) -public class TestDispatchMergingRegionsProcedure { - private static final Log LOG = LogFactory.getLog(TestDispatchMergingRegionsProcedure.class); - - protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - - private final static byte[] FAMILY = Bytes.toBytes("FAMILY"); - final static Configuration conf = UTIL.getConfiguration(); - private static Admin admin; - - private static void setupConf(Configuration conf) { - // Reduce the maximum attempts to speed up the test - conf.setInt("hbase.assignment.maximum.attempts", 3); - conf.setInt("hbase.master.maximum.ping.server.attempts", 3); - conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1); - - conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); - } - - @BeforeClass - public static void setupCluster() throws Exception { - setupConf(conf); - UTIL.startMiniCluster(1); - admin = UTIL.getHBaseAdmin(); - } - - @AfterClass - public static void cleanupTest() throws Exception { - try { - UTIL.shutdownMiniCluster(); - } catch (Exception e) { - LOG.warn("failure shutting down cluster", e); - } - } - - @Before - public void setup() throws Exception { - resetProcExecutorTestingKillFlag(); - - // Turn off balancer so it doesn't cut in and mess up our placements. - UTIL.getHBaseAdmin().setBalancerRunning(false, true); - // Turn off the meta scanner so it don't remove parent on us. - UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false); - resetProcExecutorTestingKillFlag(); - } - - @After - public void tearDown() throws Exception { - resetProcExecutorTestingKillFlag(); - for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { - LOG.info("Tear down, remove table=" + htd.getTableName()); - UTIL.deleteTable(htd.getTableName()); - } - } - - private void resetProcExecutorTestingKillFlag() { - final ProcedureExecutor procExec = getMasterProcedureExecutor(); - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); - assertTrue("expected executor to be running", procExec.isRunning()); - } - - /** - * This tests two region merges - */ - @Test(timeout=60000) - public void testMergeTwoRegions() throws Exception { - final TableName tableName = TableName.valueOf("testMergeTwoRegions"); - final ProcedureExecutor procExec = getMasterProcedureExecutor(); - - List tableRegions = createTable(tableName, 3); - - HRegionInfo[] regionsToMerge = new HRegionInfo[2]; - regionsToMerge[0] = tableRegions.get(0); - regionsToMerge[1] = tableRegions.get(1); - - final int initCompletedTaskCount = countOfCompletedMergeTaskCount(); - long procId = procExec.submitProcedure(new DispatchMergingRegionsProcedure( - procExec.getEnvironment(), tableName, regionsToMerge, true)); - ProcedureTestingUtility.waitProcedure(procExec, procId); - ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - - assertRegionCount(tableName, 2, 1, initCompletedTaskCount); - } - - /** - * This tests two concurrent region merges - */ - @Test(timeout=60000) - public void testMergeRegionsConcurrently() throws Exception { - final TableName tableName = TableName.valueOf("testMergeRegionsConcurrently"); - final ProcedureExecutor procExec = getMasterProcedureExecutor(); - - List tableRegions = createTable(tableName, 4); - - HRegionInfo[] regionsToMerge1 = new HRegionInfo[2]; - HRegionInfo[] regionsToMerge2 = new HRegionInfo[2]; - regionsToMerge1[0] = tableRegions.get(0); - regionsToMerge1[1] = tableRegions.get(1); - regionsToMerge2[0] = tableRegions.get(2); - regionsToMerge2[1] = tableRegions.get(3); - - final int initCompletedTaskCount = countOfCompletedMergeTaskCount(); - long procId1 = procExec.submitProcedure(new DispatchMergingRegionsProcedure( - procExec.getEnvironment(), tableName, regionsToMerge1, true)); - long procId2 = procExec.submitProcedure(new DispatchMergingRegionsProcedure( - procExec.getEnvironment(), tableName, regionsToMerge2, true)); - ProcedureTestingUtility.waitProcedure(procExec, procId1); - ProcedureTestingUtility.waitProcedure(procExec, procId2); - ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); - assertRegionCount(tableName, 2, 2, initCompletedTaskCount); - } - - private void waitForCompletedMergeTask(int expectedTaskCount, int initCompletedTaskCount) - throws IOException, InterruptedException { - while (true) { - long currentCompletedTaskCount = countOfCompletedMergeTaskCount() - initCompletedTaskCount; - if (currentCompletedTaskCount == expectedTaskCount) { - return; - } - LOG.info("There are " + (expectedTaskCount - currentCompletedTaskCount) + - " merge requests are not completed, wait 100 ms"); - TimeUnit.MILLISECONDS.sleep(100); - } - } - - private static int countOfCompletedMergeTaskCount() { - int completedTaskCount = 0; - for (RegionServerThread server : UTIL.getMiniHBaseCluster().getRegionServerThreads()) { - completedTaskCount += server.getRegionServer().getCompactSplitThread().getCompletedMergeTaskCount(); - } - return completedTaskCount; - } - - @Test(timeout=60000) - public void testRecoveryAndDoubleExecution() throws Exception { - final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); - final ProcedureExecutor procExec = getMasterProcedureExecutor(); - - List tableRegions = createTable(tableName, 3); - - ProcedureTestingUtility.waitNoProcedureRunning(procExec); - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); - - HRegionInfo[] regionsToMerge = new HRegionInfo[2]; - regionsToMerge[0] = tableRegions.get(0); - regionsToMerge[1] = tableRegions.get(1); - - final int initCompletedTaskCount = countOfCompletedMergeTaskCount(); - long procId = procExec.submitProcedure( - new DispatchMergingRegionsProcedure( - procExec.getEnvironment(), tableName, regionsToMerge, true)); - - // Restart the executor and execute the step twice - int numberOfSteps = DispatchMergingRegionsState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); - ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - - assertRegionCount(tableName, 2, 1, initCompletedTaskCount); - } - - @Test(timeout = 60000) - public void testRollbackAndDoubleExecution() throws Exception { - final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); - final ProcedureExecutor procExec = getMasterProcedureExecutor(); - - List tableRegions = createTable(tableName, 3); - - ProcedureTestingUtility.waitNoProcedureRunning(procExec); - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); - - HRegionInfo[] regionsToMerge = new HRegionInfo[2]; - regionsToMerge[0] = tableRegions.get(0); - regionsToMerge[1] = tableRegions.get(1); - - long procId = procExec.submitProcedure( - new DispatchMergingRegionsProcedure( - procExec.getEnvironment(), tableName, regionsToMerge, true)); - - int numberOfSteps = DispatchMergingRegionsState.values().length - 3; - MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); - } - - private List createTable(final TableName tableName, final int nregions) - throws Exception { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - byte[][] splitRows = new byte[nregions - 1][]; - for (int i = 0; i < splitRows.length; ++i) { - splitRows[i] = Bytes.toBytes(String.format("%d", i)); - } - admin.createTable(desc, splitRows); - return assertRegionCount(tableName, nregions); - } - - public List assertRegionCount(final TableName tableName, final int nregions) - throws Exception { - UTIL.waitUntilNoRegionsInTransition(); - List tableRegions = admin.getTableRegions(tableName); - assertEquals(nregions, tableRegions.size()); - return tableRegions; - } - - public List assertRegionCount(final TableName tableName, final int nregions, - int expectedTaskCount, int initCompletedTaskCount) throws Exception { - waitForCompletedMergeTask(expectedTaskCount, initCompletedTaskCount); - return assertRegionCount(tableName, nregions); - } - - private ProcedureExecutor getMasterProcedureExecutor() { - return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); - } -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index a63fec6..c973471 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -2646,55 +2646,6 @@ public class TestHRegion { } } - // //////////////////////////////////////////////////////////////////////////// - // Merge test - // //////////////////////////////////////////////////////////////////////////// - @Test - public void testMerge() throws IOException { - byte[][] families = { fam1, fam2, fam3 }; - Configuration hc = initSplit(); - // Setting up region - this.region = initHRegion(tableName, method, hc, families); - try { - LOG.info("" + HBaseTestCase.addContent(region, fam3)); - region.flush(true); - region.compactStores(); - byte[] splitRow = region.checkSplit(); - assertNotNull(splitRow); - LOG.info("SplitRow: " + Bytes.toString(splitRow)); - HRegion[] subregions = splitRegion(region, splitRow); - try { - // Need to open the regions. - for (int i = 0; i < subregions.length; i++) { - HRegion.openHRegion(subregions[i], null); - subregions[i].compactStores(); - } - Path oldRegionPath = region.getRegionFileSystem().getRegionDir(); - Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir(); - Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir(); - long startTime = System.currentTimeMillis(); - region = HRegion.mergeAdjacent(subregions[0], subregions[1]); - LOG.info("Merge regions elapsed time: " - + ((System.currentTimeMillis() - startTime) / 1000.0)); - FILESYSTEM.delete(oldRegion1, true); - FILESYSTEM.delete(oldRegion2, true); - FILESYSTEM.delete(oldRegionPath, true); - LOG.info("splitAndMerge completed."); - } finally { - for (int i = 0; i < subregions.length; i++) { - try { - HBaseTestingUtility.closeRegionAndWAL(subregions[i]); - } catch (IOException e) { - // Ignore. - } - } - } - } finally { - HBaseTestingUtility.closeRegionAndWAL(this.region); - this.region = null; - } - } - /** * @param parent * Region to split. diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java deleted file mode 100644 index 5bd2ff1..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java +++ /dev/null @@ -1,485 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.*; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.zookeeper.KeeperException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.mockito.Mockito; - -import com.google.common.collect.ImmutableList; - -/** - * Test the {@link RegionMergeTransactionImpl} class against two HRegions (as - * opposed to running cluster). - */ -@Category({RegionServerTests.class, SmallTests.class}) -public class TestRegionMergeTransaction { - private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final Path testdir = TEST_UTIL.getDataTestDir(this.getClass() - .getName()); - private HRegion region_a; - private HRegion region_b; - private HRegion region_c; - private WALFactory wals; - private FileSystem fs; - // Start rows of region_a,region_b,region_c - private static final byte[] STARTROW_A = new byte[] { 'a', 'a', 'a' }; - private static final byte[] STARTROW_B = new byte[] { 'g', 'g', 'g' }; - private static final byte[] STARTROW_C = new byte[] { 'w', 'w', 'w' }; - private static final byte[] ENDROW = new byte[] { '{', '{', '{' }; - private static final byte[] CF = HConstants.CATALOG_FAMILY; - - @Before - public void setup() throws IOException { - this.fs = FileSystem.get(TEST_UTIL.getConfiguration()); - this.fs.delete(this.testdir, true); - final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration()); - FSUtils.setRootDir(walConf, this.testdir); - this.wals = new WALFactory(walConf, null, TestRegionMergeTransaction.class.getName()); - this.region_a = createRegion(this.testdir, this.wals, STARTROW_A, STARTROW_B); - this.region_b = createRegion(this.testdir, this.wals, STARTROW_B, STARTROW_C); - this.region_c = createRegion(this.testdir, this.wals, STARTROW_C, ENDROW); - assert region_a != null && region_b != null && region_c != null; - TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true); - } - - @After - public void teardown() throws IOException { - for (HRegion region : new HRegion[] { region_a, region_b, region_c }) { - if (region != null && !region.isClosed()) region.close(); - if (this.fs.exists(region.getRegionFileSystem().getRegionDir()) - && !this.fs.delete(region.getRegionFileSystem().getRegionDir(), true)) { - throw new IOException("Failed deleting of " - + region.getRegionFileSystem().getRegionDir()); - } - } - if (this.wals != null) { - this.wals.close(); - } - this.fs.delete(this.testdir, true); - } - - /** - * Test straight prepare works. Tries to merge on {@link #region_a} and - * {@link #region_b} - * @throws IOException - */ - @Test - public void testPrepare() throws IOException { - prepareOnGoodRegions(); - } - - private RegionMergeTransactionImpl prepareOnGoodRegions() throws IOException { - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b, - false); - RegionMergeTransactionImpl spyMT = Mockito.spy(mt); - doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_a.getRegionInfo().getRegionName()); - doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_b.getRegionInfo().getRegionName()); - assertTrue(spyMT.prepare(null)); - return spyMT; - } - - /** - * Test merging the same region - */ - @Test - public void testPrepareWithSameRegion() throws IOException { - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, - this.region_a, true); - assertFalse("should not merge the same region even if it is forcible ", - mt.prepare(null)); - } - - /** - * Test merging two not adjacent regions under a common merge - */ - @Test - public void testPrepareWithRegionsNotAdjacent() throws IOException { - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, - this.region_c, false); - assertFalse("should not merge two regions if they are adjacent except it is forcible", - mt.prepare(null)); - } - - /** - * Test merging two not adjacent regions under a compulsory merge - */ - @Test - public void testPrepareWithRegionsNotAdjacentUnderCompulsory() - throws IOException { - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_c, - true); - RegionMergeTransactionImpl spyMT = Mockito.spy(mt); - doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_a.getRegionInfo().getRegionName()); - doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_c.getRegionInfo().getRegionName()); - assertTrue("Since focible is true, should merge two regions even if they are not adjacent", - spyMT.prepare(null)); - } - - /** - * Pass a reference store - */ - @Test - public void testPrepareWithRegionsWithReference() throws IOException { - HStore storeMock = Mockito.mock(HStore.class); - when(storeMock.hasReferences()).thenReturn(true); - when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf")); - when(storeMock.getSizeToFlush()).thenReturn(new MemstoreSize()); - when(storeMock.close()).thenReturn(ImmutableList.of()); - this.region_a.stores.put(Bytes.toBytes(""), storeMock); - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, - this.region_b, false); - assertFalse( - "a region should not be mergeable if it has instances of store file references", - mt.prepare(null)); - } - - @Test - public void testPrepareWithClosedRegion() throws IOException { - this.region_a.close(); - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, - this.region_b, false); - assertFalse(mt.prepare(null)); - } - - /** - * Test merging regions which are merged regions and has reference in hbase:meta all - * the same - */ - @Test - public void testPrepareWithRegionsWithMergeReference() throws IOException { - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b, - false); - RegionMergeTransactionImpl spyMT = Mockito.spy(mt); - doReturn(true).when(spyMT).hasMergeQualifierInMeta(null, - region_a.getRegionInfo().getRegionName()); - doReturn(true).when(spyMT).hasMergeQualifierInMeta(null, - region_b.getRegionInfo().getRegionName()); - assertFalse(spyMT.prepare(null)); - } - - /** - * Test RegionMergeTransactionListener - */ - @Test public void testRegionMergeTransactionListener() throws IOException { - RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b, - false); - RegionMergeTransactionImpl spyMT = Mockito.spy(mt); - doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_a.getRegionInfo().getRegionName()); - doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_b.getRegionInfo().getRegionName()); - RegionMergeTransaction.TransactionListener listener = - Mockito.mock(RegionMergeTransaction.TransactionListener.class); - mt.registerTransactionListener(listener); - mt.prepare(null); - TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0); - CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager( - TEST_UTIL.getConfiguration()); - Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp); - mt.execute(mockServer, null); - verify(listener).transition(mt, - RegionMergeTransaction.RegionMergeTransactionPhase.STARTED, - RegionMergeTransaction.RegionMergeTransactionPhase.PREPARED); - verify(listener, times(10)).transition(any(RegionMergeTransaction.class), - any(RegionMergeTransaction.RegionMergeTransactionPhase.class), - any(RegionMergeTransaction.RegionMergeTransactionPhase.class)); - verifyNoMoreInteractions(listener); - } - - @Test - public void testWholesomeMerge() throws IOException, InterruptedException { - final int rowCountOfRegionA = loadRegion(this.region_a, CF, true); - final int rowCountOfRegionB = loadRegion(this.region_b, CF, true); - assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0); - assertEquals(rowCountOfRegionA, TEST_UTIL.countRows(this.region_a)); - assertEquals(rowCountOfRegionB, TEST_UTIL.countRows(this.region_b)); - - // Start transaction. - RegionMergeTransactionImpl mt = prepareOnGoodRegions(); - - // Run the execute. Look at what it returns. - TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0); - CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager( - TEST_UTIL.getConfiguration()); - Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp); - HRegion mergedRegion = (HRegion)mt.execute(mockServer, null); - // Do some assertions about execution. - assertTrue(this.fs.exists(mt.getMergesDir())); - // Assert region_a and region_b is closed. - assertTrue(region_a.isClosed()); - assertTrue(region_b.isClosed()); - - // Assert mergedir is empty -- because its content will have been moved out - // to be under the merged region dirs. - assertEquals(0, this.fs.listStatus(mt.getMergesDir()).length); - // Check merged region have correct key span. - assertTrue(Bytes.equals(this.region_a.getRegionInfo().getStartKey(), - mergedRegion.getRegionInfo().getStartKey())); - assertTrue(Bytes.equals(this.region_b.getRegionInfo().getEndKey(), - mergedRegion.getRegionInfo().getEndKey())); - // Count rows. merged region are already open - try { - int mergedRegionRowCount = TEST_UTIL.countRows(mergedRegion); - assertEquals((rowCountOfRegionA + rowCountOfRegionB), - mergedRegionRowCount); - } finally { - HBaseTestingUtility.closeRegionAndWAL(mergedRegion); - } - // Assert the write lock is no longer held on region_a and region_b - assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); - assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); - } - - @Test - public void testRollback() throws IOException, InterruptedException { - final int rowCountOfRegionA = loadRegion(this.region_a, CF, true); - final int rowCountOfRegionB = loadRegion(this.region_b, CF, true); - assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0); - assertEquals(rowCountOfRegionA, TEST_UTIL.countRows(this.region_a)); - assertEquals(rowCountOfRegionB, TEST_UTIL.countRows(this.region_b)); - - // Start transaction. - RegionMergeTransactionImpl mt = prepareOnGoodRegions(); - - when(mt.createMergedRegionFromMerges(region_a, region_b, - mt.getMergedRegionInfo())).thenThrow( - new MockedFailedMergedRegionCreation()); - - // Run the execute. Look at what it returns. - boolean expectedException = false; - TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0); - CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager( - TEST_UTIL.getConfiguration()); - Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp); - try { - mt.execute(mockServer, null); - } catch (MockedFailedMergedRegionCreation e) { - expectedException = true; - } - assertTrue(expectedException); - // Run rollback - assertTrue(mt.rollback(null, null)); - - // Assert I can scan region_a and region_b. - int rowCountOfRegionA2 = TEST_UTIL.countRows(this.region_a); - assertEquals(rowCountOfRegionA, rowCountOfRegionA2); - int rowCountOfRegionB2 = TEST_UTIL.countRows(this.region_b); - assertEquals(rowCountOfRegionB, rowCountOfRegionB2); - - // Assert rollback cleaned up stuff in fs - assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, - mt.getMergedRegionInfo()))); - - assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); - assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); - - // Now retry the merge but do not throw an exception this time. - assertTrue(mt.prepare(null)); - HRegion mergedRegion = (HRegion)mt.execute(mockServer, null); - // Count rows. daughters are already open - // Count rows. merged region are already open - try { - int mergedRegionRowCount = TEST_UTIL.countRows(mergedRegion); - assertEquals((rowCountOfRegionA + rowCountOfRegionB), - mergedRegionRowCount); - } finally { - HBaseTestingUtility.closeRegionAndWAL(mergedRegion); - } - // Assert the write lock is no longer held on region_a and region_b - assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); - assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); - } - - @Test - public void testFailAfterPONR() throws IOException, KeeperException, InterruptedException { - final int rowCountOfRegionA = loadRegion(this.region_a, CF, true); - final int rowCountOfRegionB = loadRegion(this.region_b, CF, true); - assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0); - assertEquals(rowCountOfRegionA, TEST_UTIL.countRows(this.region_a)); - assertEquals(rowCountOfRegionB, TEST_UTIL.countRows(this.region_b)); - - // Start transaction. - RegionMergeTransactionImpl mt = prepareOnGoodRegions(); - Mockito.doThrow(new MockedFailedMergedRegionOpen()) - .when(mt) - .openMergedRegion((Server) Mockito.anyObject(), - (RegionServerServices) Mockito.anyObject(), - (HRegion) Mockito.anyObject()); - - // Run the execute. Look at what it returns. - boolean expectedException = false; - TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0); - CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager( - TEST_UTIL.getConfiguration()); - Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp); - try { - mt.execute(mockServer, null); - } catch (MockedFailedMergedRegionOpen e) { - expectedException = true; - } - assertTrue(expectedException); - // Run rollback returns false that we should restart. - assertFalse(mt.rollback(null, null)); - // Make sure that merged region is still in the filesystem, that - // they have not been removed; this is supposed to be the case if we go - // past point of no return. - Path tableDir = this.region_a.getRegionFileSystem().getRegionDir() - .getParent(); - Path mergedRegionDir = new Path(tableDir, mt.getMergedRegionInfo() - .getEncodedName()); - assertTrue(TEST_UTIL.getTestFileSystem().exists(mergedRegionDir)); - } - - @Test - public void testMergedRegionBoundary() { - TableName tableName = - TableName.valueOf("testMergedRegionBoundary"); - byte[] a = Bytes.toBytes("a"); - byte[] b = Bytes.toBytes("b"); - byte[] z = Bytes.toBytes("z"); - HRegionInfo r1 = new HRegionInfo(tableName); - HRegionInfo r2 = new HRegionInfo(tableName, a, z); - HRegionInfo m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); - assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r1.getEndKey())); - - r1 = new HRegionInfo(tableName, null, a); - r2 = new HRegionInfo(tableName, a, z); - m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); - assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r2.getEndKey())); - - r1 = new HRegionInfo(tableName, null, a); - r2 = new HRegionInfo(tableName, z, null); - m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); - assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r2.getEndKey())); - - r1 = new HRegionInfo(tableName, a, z); - r2 = new HRegionInfo(tableName, z, null); - m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); - assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r2.getEndKey())); - - r1 = new HRegionInfo(tableName, a, b); - r2 = new HRegionInfo(tableName, b, z); - m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); - assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r2.getEndKey())); - } - - /** - * Exception used in this class only. - */ - @SuppressWarnings("serial") - private class MockedFailedMergedRegionCreation extends IOException { - } - - @SuppressWarnings("serial") - private class MockedFailedMergedRegionOpen extends IOException { - } - - private HRegion createRegion(final Path testdir, final WALFactory wals, - final byte[] startrow, final byte[] endrow) - throws IOException { - // Make a region with start and end keys. - HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table")); - HColumnDescriptor hcd = new HColumnDescriptor(CF); - htd.addFamily(hcd); - HRegionInfo hri = new HRegionInfo(htd.getTableName(), startrow, endrow); - HRegion a = HBaseTestingUtility.createRegionAndWAL(hri, testdir, - TEST_UTIL.getConfiguration(), htd); - HBaseTestingUtility.closeRegionAndWAL(a); - return HRegion.openHRegion(testdir, hri, htd, - wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()), - TEST_UTIL.getConfiguration()); - } - - /** - * Load region with rows from 'aaa' to 'zzz', skip the rows which are out of - * range of the region - * @param r Region - * @param f Family - * @param flush flush the cache if true - * @return Count of rows loaded. - * @throws IOException - */ - private int loadRegion(final HRegion r, final byte[] f, final boolean flush) - throws IOException { - byte[] k = new byte[3]; - int rowCount = 0; - for (byte b1 = 'a'; b1 <= 'z'; b1++) { - for (byte b2 = 'a'; b2 <= 'z'; b2++) { - for (byte b3 = 'a'; b3 <= 'z'; b3++) { - k[0] = b1; - k[1] = b2; - k[2] = b3; - if (!HRegion.rowIsInRange(r.getRegionInfo(), k)) { - continue; - } - Put put = new Put(k); - put.addColumn(f, null, k); - if (r.getWAL() == null) - put.setDurability(Durability.SKIP_WAL); - r.put(put); - rowCount++; - } - } - if (flush) { - r.flush(true); - } - } - return rowCount; - } - -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java deleted file mode 100644 index 661af14..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Tests merging a normal table's regions - */ -@Category({MiscTests.class, MediumTests.class}) -public class TestMergeTable { - private static final Log LOG = LogFactory.getLog(TestMergeTable.class); - private final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static final byte [] COLUMN_NAME = Bytes.toBytes("contents"); - private static final byte [] VALUE; - static { - // We will use the same value for the rows as that is not really important here - String partialValue = String.valueOf(System.currentTimeMillis()); - StringBuilder val = new StringBuilder(); - while (val.length() < 1024) { - val.append(partialValue); - } - VALUE = Bytes.toBytes(val.toString()); - } - - /** - * Test merge. - * Hand-makes regions of a mergeable size and adds the hand-made regions to - * hand-made meta. The hand-made regions are created offline. We then start - * up mini cluster, disables the hand-made table and starts in on merging. - * @throws Exception - */ - @Test (timeout=300000) public void testMergeTable() throws Exception { - // Table we are manually creating offline. - HTableDescriptor desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(Bytes.toBytes("test"))); - desc.addFamily(new HColumnDescriptor(COLUMN_NAME)); - - // Set maximum regionsize down. - UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 64L * 1024L * 1024L); - // Make it so we don't split. - UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0); - // Startup hdfs. Its in here we'll be putting our manually made regions. - UTIL.startMiniDFSCluster(1); - // Create hdfs hbase rootdir. - Path rootdir = UTIL.createRootDir(); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - if (fs.exists(rootdir)) { - if (fs.delete(rootdir, true)) { - LOG.info("Cleaned up existing " + rootdir); - } - } - - // Now create three data regions: The first is too large to merge since it - // will be > 64 MB in size. The second two will be smaller and will be - // selected for merging. - - // To ensure that the first region is larger than 64MB we need to write at - // least 65536 rows. We will make certain by writing 70000 - byte [] row_70001 = Bytes.toBytes("row_70001"); - byte [] row_80001 = Bytes.toBytes("row_80001"); - - // Create regions and populate them at same time. Create the tabledir - // for them first. - new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc); - HRegion [] regions = { - createRegion(desc, null, row_70001, 1, 70000, rootdir), - createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir), - createRegion(desc, row_80001, null, 80001, 11000, rootdir) - }; - - // Now create the root and meta regions and insert the data regions - // created above into hbase:meta - setupMeta(rootdir, regions); - try { - LOG.info("Starting mini zk cluster"); - UTIL.startMiniZKCluster(); - LOG.info("Starting mini hbase cluster"); - UTIL.startMiniHBaseCluster(1, 1); - Configuration c = new Configuration(UTIL.getConfiguration()); - Connection connection = UTIL.getConnection(); - - List originalTableRegions = - MetaTableAccessor.getTableRegions(connection, desc.getTableName()); - LOG.info("originalTableRegions size=" + originalTableRegions.size() + - "; " + originalTableRegions); - Admin admin = connection.getAdmin(); - admin.disableTable(desc.getTableName()); - admin.close(); - HMerge.merge(c, FileSystem.get(c), desc.getTableName()); - List postMergeTableRegions = - MetaTableAccessor.getTableRegions(connection, desc.getTableName()); - LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() + - "; " + postMergeTableRegions); - assertTrue("originalTableRegions=" + originalTableRegions.size() + - ", postMergeTableRegions=" + postMergeTableRegions.size(), - postMergeTableRegions.size() < originalTableRegions.size()); - LOG.info("Done with merge"); - } finally { - UTIL.shutdownMiniCluster(); - LOG.info("After cluster shutdown"); - } - } - - private HRegion createRegion(final HTableDescriptor desc, - byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir) - throws IOException { - HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey); - HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootdir, UTIL.getConfiguration(), - desc); - LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString()); - for(int i = firstRow; i < firstRow + nrows; i++) { - Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i))); - put.setDurability(Durability.SKIP_WAL); - put.addColumn(COLUMN_NAME, null, VALUE); - region.put(put); - if (i % 10000 == 0) { - LOG.info("Flushing write #" + i); - region.flush(true); - } - } - HBaseTestingUtility.closeRegionAndWAL(region); - return region; - } - - protected void setupMeta(Path rootdir, final HRegion [] regions) - throws IOException { - HRegion meta = - HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, rootdir, - UTIL.getConfiguration(), UTIL.getMetaTableDescriptor()); - for (HRegion r: regions) { - HRegion.addRegionToMETA(meta, r); - } - HBaseTestingUtility.closeRegionAndWAL(meta); - } - -} - diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java deleted file mode 100644 index 1924c9e..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ /dev/null @@ -1,301 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseTestCase; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.util.ToolRunner; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** Test stand alone merge tool that can merge arbitrary regions */ -@Category({MiscTests.class, LargeTests.class}) -public class TestMergeTool extends HBaseTestCase { - private static final Log LOG = LogFactory.getLog(TestMergeTool.class); - HBaseTestingUtility TEST_UTIL; -// static final byte [] COLUMN_NAME = Bytes.toBytes("contents:"); - static final byte [] FAMILY = Bytes.toBytes("contents"); - static final byte [] QUALIFIER = Bytes.toBytes("dc"); - - private final HRegionInfo[] sourceRegions = new HRegionInfo[5]; - private final HRegion[] regions = new HRegion[5]; - private HTableDescriptor desc; - private byte [][][] rows; - private MiniDFSCluster dfsCluster = null; - private WALFactory wals; - - @Override - @Before - public void setUp() throws Exception { - // Set the timeout down else this test will take a while to complete. - this.conf.setLong("hbase.zookeeper.recoverable.waittime", 10); - // Make it so we try and connect to a zk that is not there (else we might - // find a zk ensemble put up by another concurrent test and this will - // mess up this test. Choose unlikely port. Default test port is 21818. - // Default zk port is 2181. - this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 10001); - - this.conf.set("hbase.hstore.compactionThreshold", "2"); - - // Create table description - this.desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf("TestMergeTool")); - this.desc.addFamily(new HColumnDescriptor(FAMILY)); - - /* - * Create the HRegionInfos for the regions. - */ - // Region 0 will contain the key range [row_0200,row_0300) - sourceRegions[0] = new HRegionInfo(this.desc.getTableName(), - Bytes.toBytes("row_0200"), - Bytes.toBytes("row_0300")); - - // Region 1 will contain the key range [row_0250,row_0400) and overlaps - // with Region 0 - sourceRegions[1] = - new HRegionInfo(this.desc.getTableName(), - Bytes.toBytes("row_0250"), - Bytes.toBytes("row_0400")); - - // Region 2 will contain the key range [row_0100,row_0200) and is adjacent - // to Region 0 or the region resulting from the merge of Regions 0 and 1 - sourceRegions[2] = - new HRegionInfo(this.desc.getTableName(), - Bytes.toBytes("row_0100"), - Bytes.toBytes("row_0200")); - - // Region 3 will contain the key range [row_0500,row_0600) and is not - // adjacent to any of Regions 0, 1, 2 or the merged result of any or all - // of those regions - sourceRegions[3] = - new HRegionInfo(this.desc.getTableName(), - Bytes.toBytes("row_0500"), - Bytes.toBytes("row_0600")); - - // Region 4 will have empty start and end keys and overlaps all regions. - sourceRegions[4] = - new HRegionInfo(this.desc.getTableName(), - HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY); - - /* - * Now create some row keys - */ - this.rows = new byte [5][][]; - this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" }); - this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350", - "row_035" }); - this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175", - "row_0175", "row_0175"}); - this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560", - "row_0560", "row_0560", "row_0560"}); - this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000", - "row_1000", "row_1000", "row_1000", "row_1000" }); - - // Start up dfs - TEST_UTIL = new HBaseTestingUtility(conf); - this.dfsCluster = TEST_UTIL.startMiniDFSCluster(2); - this.fs = this.dfsCluster.getFileSystem(); - System.out.println("fs=" + this.fs); - FSUtils.setFsDefault(this.conf, new Path(fs.getUri())); - TEST_UTIL.createRootDir(); - - // Note: we must call super.setUp after starting the mini cluster or - // we will end up with a local file system - - super.setUp(); - wals = new WALFactory(conf, null, "TestMergeTool"); - try { - // Create meta region - createMetaRegion(); - new FSTableDescriptors(this.conf, this.fs, testDir).createTableDescriptor(this.desc); - /* - * Create the regions we will merge - */ - for (int i = 0; i < sourceRegions.length; i++) { - regions[i] = - HBaseTestingUtility.createRegionAndWAL(this.sourceRegions[i], testDir, this.conf, - this.desc); - /* - * Insert data - */ - for (int j = 0; j < rows[i].length; j++) { - byte [] row = rows[i][j]; - Put put = new Put(row); - put.addColumn(FAMILY, QUALIFIER, row); - regions[i].put(put); - } - HRegion.addRegionToMETA(meta, regions[i]); - } - // Close root and meta regions - closeRootAndMeta(); - - } catch (Exception e) { - TEST_UTIL.shutdownMiniCluster(); - throw e; - } - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - for (int i = 0; i < sourceRegions.length; i++) { - HRegion r = regions[i]; - if (r != null) { - HBaseTestingUtility.closeRegionAndWAL(r); - } - } - wals.close(); - TEST_UTIL.shutdownMiniCluster(); - } - - /* - * @param msg Message that describes this merge - * @param regionName1 - * @param regionName2 - * @param log Log to use merging. - * @param upperbound Verifying, how high up in this.rows to go. - * @return Merged region. - * @throws Exception - */ - private HRegion mergeAndVerify(final String msg, final String regionName1, - final String regionName2, final WAL log, final int upperbound) - throws Exception { - Merge merger = new Merge(); - LOG.info(msg); - LOG.info("fs2=" + this.conf.get("fs.defaultFS")); - int errCode = ToolRunner.run(this.conf, merger, - new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2} - ); - assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0); - HRegionInfo mergedInfo = merger.getMergedHRegionInfo(); - - // Now verify that we can read all the rows from regions 0, 1 - // in the new merged region. - HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf); - verifyMerge(merged, upperbound); - merged.close(); - LOG.info("Verified " + msg); - return merged; - } - - private void verifyMerge(final HRegion merged, final int upperbound) - throws IOException { - //Test - Scan scan = new Scan(); - scan.addFamily(FAMILY); - InternalScanner scanner = merged.getScanner(scan); - try { - List testRes = null; - while (true) { - testRes = new ArrayList(); - boolean hasNext = scanner.next(testRes); - if (!hasNext) { - break; - } - } - } finally { - scanner.close(); - } - - //!Test - - for (int i = 0; i < upperbound; i++) { - for (int j = 0; j < rows[i].length; j++) { - Get get = new Get(rows[i][j]); - get.addFamily(FAMILY); - Result result = merged.get(get); - assertEquals(1, result.size()); - byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]); - assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes); - assertTrue(Bytes.equals(bytes, rows[i][j])); - } - } - } - - /** - * Test merge tool. - * @throws Exception - */ - @Test - public void testMergeTool() throws Exception { - // First verify we can read the rows from the source regions and that they - // contain the right data. - for (int i = 0; i < regions.length; i++) { - for (int j = 0; j < rows[i].length; j++) { - Get get = new Get(rows[i][j]); - get.addFamily(FAMILY); - Result result = regions[i].get(get); - byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]); - assertNotNull(bytes); - assertTrue(Bytes.equals(bytes, rows[i][j])); - } - // Close the region and delete the log - HBaseTestingUtility.closeRegionAndWAL(regions[i]); - } - WAL log = wals.getWAL(new byte[]{}, null); - // Merge Region 0 and Region 1 - HRegion merged = mergeAndVerify("merging regions 0 and 1 ", - this.sourceRegions[0].getRegionNameAsString(), - this.sourceRegions[1].getRegionNameAsString(), log, 2); - - // Merge the result of merging regions 0 and 1 with region 2 - merged = mergeAndVerify("merging regions 0+1 and 2", - merged.getRegionInfo().getRegionNameAsString(), - this.sourceRegions[2].getRegionNameAsString(), log, 3); - - // Merge the result of merging regions 0, 1 and 2 with region 3 - merged = mergeAndVerify("merging regions 0+1+2 and 3", - merged.getRegionInfo().getRegionNameAsString(), - this.sourceRegions[3].getRegionNameAsString(), log, 4); - - // Merge the result of merging regions 0, 1, 2 and 3 with region 4 - merged = mergeAndVerify("merging regions 0+1+2+3 and 4", - merged.getRegionInfo().getRegionNameAsString(), - this.sourceRegions[4].getRegionNameAsString(), log, rows.length); - } - -} -