diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 52b935f..5b53a7e 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -907,6 +907,7 @@ public interface Admin extends Abortable, Closeable { void mergeRegions(final byte[] nameOfRegionA, final byte[] nameOfRegionB, final boolean forcible) throws IOException; + /** * Merge two regions. Asynchronous operation. * @@ -922,6 +923,18 @@ public interface Admin extends Abortable, Closeable { final boolean forcible) throws IOException; /** + * Merge regions. Asynchronous operation. + * + * @param nameofRegionsToMerge encoded or full name of daughter regions + * @param forcible true if do a compulsory merge, otherwise we will only merge + * adjacent regions + * @throws IOException + */ + Future mergeRegionsAsync( + final byte[][] nameofRegionsToMerge, + final boolean forcible) throws IOException; + + /** * Split a table. Asynchronous operation. * * @param tableName table to split diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index aa984b1..e75d9a5 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1274,6 +1274,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.MergeTableRegionsResponse mergeTableRegions( + RpcController controller, MasterProtos.MergeTableRegionsRequest request) + throws ServiceException { + return stub.mergeTableRegions(controller, request); + } + + @Override public MasterProtos.AssignRegionResponse assignRegion(RpcController controller, MasterProtos.AssignRegionRequest request) throws ServiceException { return stub.assignRegion(controller, request); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 00463f38..9bfe276 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -119,8 +119,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTabl import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; @@ -147,6 +145,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableD import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest; @@ -1513,68 +1513,80 @@ public class HBaseAdmin implements Admin { final byte[] nameOfRegionA, final byte[] nameOfRegionB, final boolean forcible) throws IOException { + byte[][] nameofRegionsToMerge = new byte[2][]; + nameofRegionsToMerge[0] = nameOfRegionA; + nameofRegionsToMerge[1] = nameOfRegionB; + return mergeRegionsAsync(nameofRegionsToMerge, forcible); + } - final byte[] encodedNameOfRegionA = isEncodedRegionName(nameOfRegionA) ? - nameOfRegionA : HRegionInfo.encodeRegionName(nameOfRegionA).getBytes(); - final byte[] encodedNameOfRegionB = isEncodedRegionName(nameOfRegionB) ? - nameOfRegionB : HRegionInfo.encodeRegionName(nameOfRegionB).getBytes(); - - TableName tableName; - Pair pair = getRegion(nameOfRegionA); - - if (pair != null) { - if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly"); - } - tableName = pair.getFirst().getTable(); - } else { - throw new UnknownRegionException ( - "Can't invoke merge on unknown region " + Bytes.toStringBinary(encodedNameOfRegionA)); + /** + * Merge two regions. Asynchronous operation. + * @param nameofRegionsToMerge encoded or full name of daughter regions + * @param forcible true if do a compulsory merge, otherwise we will only merge + * adjacent regions + * @throws IOException + */ + @Override + public Future mergeRegionsAsync( + final byte[][] nameofRegionsToMerge, + final boolean forcible) throws IOException { + assert(nameofRegionsToMerge.length >= 2); + byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][]; + for(int i = 0; i < nameofRegionsToMerge.length; i++) { + encodedNameofRegionsToMerge[i] = isEncodedRegionName(nameofRegionsToMerge[i]) ? + nameofRegionsToMerge[i] : HRegionInfo.encodeRegionName(nameofRegionsToMerge[i]).getBytes(); } - pair = getRegion(nameOfRegionB); - if (pair != null) { - if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly"); - } + TableName tableName = null; + Pair pair; - if (!tableName.equals(pair.getFirst().getTable())) { - throw new IllegalArgumentException ("Cannot merge regions from two different tables " + - tableName + " and " + pair.getFirst().getTable()); + for(int i = 0; i < nameofRegionsToMerge.length; i++) { + pair = getRegion(nameofRegionsToMerge[i]); + + if (pair != null) { + if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { + throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly"); + } + if (tableName == null) { + tableName = pair.getFirst().getTable(); + } else if (!tableName.equals(pair.getFirst().getTable())) { + throw new IllegalArgumentException ("Cannot merge regions from two different tables " + + tableName + " and " + pair.getFirst().getTable()); + } + } else { + throw new UnknownRegionException ( + "Can't invoke merge on unknown region " + + Bytes.toStringBinary(encodedNameofRegionsToMerge[i])); } - } else { - throw new UnknownRegionException ( - "Can't invoke merge on unknown region " + Bytes.toStringBinary(encodedNameOfRegionB)); } - DispatchMergingRegionsResponse response = - executeCallable(new MasterCallable(getConnection(), + MergeTableRegionsResponse response = + executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override - protected DispatchMergingRegionsResponse rpcCall() throws Exception { - DispatchMergingRegionsRequest request = RequestConverter - .buildDispatchMergingRegionsRequest( - encodedNameOfRegionA, - encodedNameOfRegionB, + protected MergeTableRegionsResponse rpcCall() throws Exception { + MergeTableRegionsRequest request = RequestConverter + .buildMergeTableRegionsRequest( + encodedNameofRegionsToMerge, forcible, ng.getNonceGroup(), ng.newNonce()); - return master.dispatchMergingRegions(getRpcController(), request); + return master.mergeTableRegions(getRpcController(), request); } }); - return new DispatchMergingRegionsFuture(this, tableName, response); + return new MergeTableRegionsFuture(this, tableName, response); } - private static class DispatchMergingRegionsFuture extends TableFuture { - public DispatchMergingRegionsFuture( + private static class MergeTableRegionsFuture extends TableFuture { + public MergeTableRegionsFuture( final HBaseAdmin admin, final TableName tableName, - final DispatchMergingRegionsResponse response) { + final MergeTableRegionsResponse response) { super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() : null); } - public DispatchMergingRegionsFuture( + public MergeTableRegionsFuture( final HBaseAdmin admin, final TableName tableName, final Long procId) { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 5876fae..c6d647e 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -105,8 +105,8 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -1756,26 +1756,26 @@ public final class ProtobufUtil { } /** - * A helper to close a region for split + * A helper to close a region for split or merge * using admin protocol. * * @param controller RPC controller * @param admin Admin service * @param server the RS that hosts the target region - * @param parentRegionInfo the target region info + * @param regionInfo the target region info * @return true if the region is closed * @throws IOException */ - public static boolean closeRegionForSplit( + public static boolean closeRegionForSplitOrMerge( final RpcController controller, final AdminService.BlockingInterface admin, final ServerName server, - final HRegionInfo parentRegionInfo) throws IOException { - CloseRegionForSplitRequest closeRegionForSplitRequest = - ProtobufUtil.buildCloseRegionForSplitRequest(server, parentRegionInfo); + final HRegionInfo... regionInfo) throws IOException { + CloseRegionForSplitOrMergeRequest closeRegionForRequest = + ProtobufUtil.buildCloseRegionForSplitOrMergeRequest(server, regionInfo); try { - CloseRegionForSplitResponse response = - admin.closeRegionForSplit(controller, closeRegionForSplitRequest); + CloseRegionForSplitOrMergeResponse response = + admin.closeRegionForSplitOrMerge(controller, closeRegionForRequest); return ResponseConverter.isClosed(response); } catch (ServiceException se) { throw getRemoteException(se); @@ -3130,19 +3130,22 @@ public final class ProtobufUtil { } /** - * Create a CloseRegionForSplitRequest for a given region + * Create a CloseRegionForSplitOrMergeRequest for given regions * * @param server the RS server that hosts the region - * @param parentRegionInfo the info of the region to close + * @param regionsToClose the info of the regions to close * @return a CloseRegionForSplitRequest */ - public static CloseRegionForSplitRequest buildCloseRegionForSplitRequest( + public static CloseRegionForSplitOrMergeRequest buildCloseRegionForSplitOrMergeRequest( final ServerName server, - final HRegionInfo parentRegionInfo) { - CloseRegionForSplitRequest.Builder builder = CloseRegionForSplitRequest.newBuilder(); - RegionSpecifier parentRegion = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, parentRegionInfo.getRegionName()); - builder.setRegion(parentRegion); + final HRegionInfo... regionsToClose) { + CloseRegionForSplitOrMergeRequest.Builder builder = + CloseRegionForSplitOrMergeRequest.newBuilder(); + for(int i = 0; i < regionsToClose.length; i++) { + RegionSpecifier regionToClose = RequestConverter.buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionsToClose[i].getRegionName()); + builder.addRegion(regionToClose); + } return builder.build(); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 306c237..f938fd0 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -96,6 +96,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest; @@ -1096,6 +1097,22 @@ public final class RequestConverter { return builder.build(); } + public static MergeTableRegionsRequest buildMergeTableRegionsRequest( + final byte[][] encodedNameOfdaughaterRegions, + final boolean forcible, + final long nonceGroup, + final long nonce) throws DeserializationException { + MergeTableRegionsRequest.Builder builder = MergeTableRegionsRequest.newBuilder(); + for (int i = 0; i< encodedNameOfdaughaterRegions.length; i++) { + builder.addRegion(buildRegionSpecifier( + RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfdaughaterRegions[i])); + } + builder.setForcible(forcible); + builder.setNonceGroup(nonceGroup); + builder.setNonce(nonce); + return builder.build(); + } + public static SplitTableRegionRequest buildSplitTableRegionRequest( final HRegionInfo regionInfo, final byte[] splitPoint, diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index 11fc931..760f630 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.SingleResponse; import org.apache.hadoop.hbase.ipc.ServerRpcController; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; @@ -259,7 +259,7 @@ public final class ResponseConverter { * @return the region close state */ public static boolean isClosed - (final CloseRegionForSplitResponse proto) { + (final CloseRegionForSplitOrMergeResponse proto) { if (proto == null || !proto.hasClosed()) return false; return proto.getClosed(); } diff --git hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index 8c9cea2..93f3460 100644 --- hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -149,7 +149,8 @@ public class ProcedureTestingUtility { assertSingleExecutorForKillTests(procExecutor); } - private static void assertSingleExecutorForKillTests(final ProcedureExecutor procExecutor) { + private static void assertSingleExecutorForKillTests( + final ProcedureExecutor procExecutor) { if (procExecutor.testing == null) return; if (procExecutor.testing.killBeforeStoreUpdate || procExecutor.testing.toggleKillBeforeStoreUpdate) { diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java index 6f20fcc..576c739 100644 --- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java +++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java @@ -9414,41 +9414,53 @@ public final class AdminProtos { } - public interface CloseRegionForSplitRequestOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitRequest) + public interface CloseRegionForSplitOrMergeRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitOrMergeRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - boolean hasRegion(); + java.util.List + getRegionList(); /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index); /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + int getRegionCount(); + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + java.util.List + getRegionOrBuilderList(); + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index); } /** *
    **
-   * Closes the specified region and create
-   * child region.
+   * Closes the specified region(s) for
+   * split or merge
    * 
* - * Protobuf type {@code hbase.pb.CloseRegionForSplitRequest} + * Protobuf type {@code hbase.pb.CloseRegionForSplitOrMergeRequest} */ - public static final class CloseRegionForSplitRequest extends + public static final class CloseRegionForSplitOrMergeRequest extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitRequest) - CloseRegionForSplitRequestOrBuilder { - // Use CloseRegionForSplitRequest.newBuilder() to construct. - private CloseRegionForSplitRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitOrMergeRequest) + CloseRegionForSplitOrMergeRequestOrBuilder { + // Use CloseRegionForSplitOrMergeRequest.newBuilder() to construct. + private CloseRegionForSplitOrMergeRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private CloseRegionForSplitRequest() { + private CloseRegionForSplitOrMergeRequest() { + region_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -9456,7 +9468,7 @@ public final class AdminProtos { getUnknownFields() { return this.unknownFields; } - private CloseRegionForSplitRequest( + private CloseRegionForSplitOrMergeRequest( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { @@ -9480,16 +9492,12 @@ public final class AdminProtos { break; } case 10: { - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = region_.toBuilder(); - } - region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(region_); - region_ = subBuilder.buildPartial(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } - bitField0_ |= 0x00000001; + region_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry)); break; } } @@ -9500,42 +9508,58 @@ public final class AdminProtos { throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.Builder.class); } - private int bitField0_; public static final int REGION_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier region_; + private java.util.List region_; /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public boolean hasRegion() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getRegionList() { + return region_; } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { - return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + public java.util.List + getRegionOrBuilderList() { + return region_; } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { - return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + public int getRegionCount() { + return region_.size(); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + return region_.get(index); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + return region_.get(index); } private byte memoizedIsInitialized = -1; @@ -9544,13 +9568,11 @@ public final class AdminProtos { if (isInitialized == 1) return true; if (isInitialized == 0) return false; - if (!hasRegion()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegion().isInitialized()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -9558,8 +9580,8 @@ public final class AdminProtos { public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, getRegion()); + for (int i = 0; i < region_.size(); i++) { + output.writeMessage(1, region_.get(i)); } unknownFields.writeTo(output); } @@ -9569,9 +9591,9 @@ public final class AdminProtos { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + for (int i = 0; i < region_.size(); i++) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getRegion()); + .computeMessageSize(1, region_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -9584,17 +9606,14 @@ public final class AdminProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest) obj; + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest) obj; boolean result = true; - result = result && (hasRegion() == other.hasRegion()); - if (hasRegion()) { - result = result && getRegion() - .equals(other.getRegion()); - } + result = result && getRegionList() + .equals(other.getRegionList()); result = result && unknownFields.equals(other.unknownFields); return result; } @@ -9606,67 +9625,67 @@ public final class AdminProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegion()) { + if (getRegionCount() > 0) { hash = (37 * hash) + REGION_FIELD_NUMBER; - hash = (53 * hash) + getRegion().hashCode(); + hash = (53 * hash) + getRegionList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom(byte[] data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom( byte[] data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -9678,7 +9697,7 @@ public final class AdminProtos { public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { @@ -9695,29 +9714,29 @@ public final class AdminProtos { /** *
      **
-     * Closes the specified region and create
-     * child region.
+     * Closes the specified region(s) for
+     * split or merge
      * 
* - * Protobuf type {@code hbase.pb.CloseRegionForSplitRequest} + * Protobuf type {@code hbase.pb.CloseRegionForSplitOrMergeRequest} */ public static final class Builder extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:hbase.pb.CloseRegionForSplitRequest) - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequestOrBuilder { + // @@protoc_insertion_point(builder_implements:hbase.pb.CloseRegionForSplitOrMergeRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequestOrBuilder { public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -9736,44 +9755,43 @@ public final class AdminProtos { public Builder clear() { super.clear(); if (regionBuilder_ == null) { - region_ = null; + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); return this; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_descriptor; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest build() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest result = buildPartial(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest buildPartial() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest(this); + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + bitField0_ = (bitField0_ & ~0x00000001); + } result.region_ = region_; } else { result.region_ = regionBuilder_.build(); } - result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -9805,18 +9823,41 @@ public final class AdminProtos { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest) { - return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)other); + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest other) { - if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance()) return this; - if (other.hasRegion()) { - mergeRegion(other.getRegion()); + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.getDefaultInstance()) return this; + if (regionBuilder_ == null) { + if (!other.region_.isEmpty()) { + if (region_.isEmpty()) { + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRegionIsMutable(); + region_.addAll(other.region_); + } + onChanged(); + } + } else { + if (!other.region_.isEmpty()) { + if (regionBuilder_.isEmpty()) { + regionBuilder_.dispose(); + regionBuilder_ = null; + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + regionBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getRegionFieldBuilder() : null; + } else { + regionBuilder_.addAllMessages(other.region_); + } + } } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -9824,11 +9865,10 @@ public final class AdminProtos { } public final boolean isInitialized() { - if (!hasRegion()) { - return false; - } - if (!getRegion().isInitialized()) { - return false; + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + return false; + } } return true; } @@ -9837,11 +9877,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parsedMessage = null; + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -9852,117 +9892,239 @@ public final class AdminProtos { } private int bitField0_; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier region_ = null; - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + private java.util.List region_ = + java.util.Collections.emptyList(); + private void ensureRegionIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(region_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public boolean hasRegion() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getRegionList() { + if (regionBuilder_ == null) { + return java.util.Collections.unmodifiableList(region_); + } else { + return regionBuilder_.getMessageList(); + } } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + public int getRegionCount() { if (regionBuilder_ == null) { - return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + return region_.size(); } else { - return regionBuilder_.getMessage(); + return regionBuilder_.getCount(); } } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + if (regionBuilder_ == null) { + return region_.get(index); + } else { + return regionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { if (regionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - region_ = value; + ensureRegionIsMutable(); + region_.set(index, value); onChanged(); } else { - regionBuilder_.setMessage(value); + regionBuilder_.setMessage(index, value); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ public Builder setRegion( - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { if (regionBuilder_ == null) { - region_ = builderForValue.build(); + ensureRegionIsMutable(); + region_.set(index, builderForValue.build()); onChanged(); } else { - regionBuilder_.setMessage(builderForValue.build()); + regionBuilder_.setMessage(index, builderForValue.build()); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + public Builder addRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { if (regionBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - region_ != null && - region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { - region_ = - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); - } else { - region_ = value; + if (value == null) { + throw new NullPointerException(); } + ensureRegionIsMutable(); + region_.add(value); onChanged(); } else { - regionBuilder_.mergeFrom(value); + regionBuilder_.addMessage(value); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.add(index, value); + onChanged(); + } else { + regionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(builderForValue.build()); + onChanged(); + } else { + regionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(index, builderForValue.build()); + onChanged(); + } else { + regionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addAllRegion( + java.lang.Iterable values) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, region_); + onChanged(); + } else { + regionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; */ public Builder clearRegion() { if (regionBuilder_ == null) { - region_ = null; + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegionFieldBuilder().getBuilder(); + public Builder removeRegion(int index) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.remove(index); + onChanged(); + } else { + regionBuilder_.remove(index); + } + return this; } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder( + int index) { + return getRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + if (regionBuilder_ == null) { + return region_.get(index); } else { + return regionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public java.util.List + getRegionOrBuilderList() { if (regionBuilder_ != null) { - return regionBuilder_.getMessageOrBuilder(); + return regionBuilder_.getMessageOrBuilderList(); } else { - return region_ == null ? - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + return java.util.Collections.unmodifiableList(region_); } } /** - * required .hbase.pb.RegionSpecifier region = 1; + * repeated .hbase.pb.RegionSpecifier region = 1; */ - private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder() { + return getRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder( + int index) { + return getRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public java.util.List + getRegionBuilderList() { + return getRegionFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> getRegionFieldBuilder() { if (regionBuilder_ == null) { - regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( - getRegion(), + region_, + ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); region_ = null; @@ -9980,46 +10142,46 @@ public final class AdminProtos { } - // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitOrMergeRequest) } - // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitRequest) - private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitOrMergeRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest(); + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest(); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest getDefaultInstance() { + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser - PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { - public CloseRegionForSplitRequest parsePartialFrom( + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public CloseRegionForSplitOrMergeRequest parsePartialFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return new CloseRegionForSplitRequest(input, extensionRegistry); + return new CloseRegionForSplitOrMergeRequest(input, extensionRegistry); } }; - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { return PARSER; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest getDefaultInstanceForType() { + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface CloseRegionForSplitResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitResponse) + public interface CloseRegionForSplitOrMergeResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitOrMergeResponse) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** @@ -10032,17 +10194,17 @@ public final class AdminProtos { boolean getClosed(); } /** - * Protobuf type {@code hbase.pb.CloseRegionForSplitResponse} + * Protobuf type {@code hbase.pb.CloseRegionForSplitOrMergeResponse} */ - public static final class CloseRegionForSplitResponse extends + public static final class CloseRegionForSplitOrMergeResponse extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitResponse) - CloseRegionForSplitResponseOrBuilder { - // Use CloseRegionForSplitResponse.newBuilder() to construct. - private CloseRegionForSplitResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitOrMergeResponse) + CloseRegionForSplitOrMergeResponseOrBuilder { + // Use CloseRegionForSplitOrMergeResponse.newBuilder() to construct. + private CloseRegionForSplitOrMergeResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private CloseRegionForSplitResponse() { + private CloseRegionForSplitOrMergeResponse() { closed_ = false; } @@ -10051,7 +10213,7 @@ public final class AdminProtos { getUnknownFields() { return this.unknownFields; } - private CloseRegionForSplitResponse( + private CloseRegionForSplitOrMergeResponse( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { @@ -10093,14 +10255,14 @@ public final class AdminProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.Builder.class); } private int bitField0_; @@ -10161,10 +10323,10 @@ public final class AdminProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) obj; + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse) obj; boolean result = true; result = result && (hasClosed() == other.hasClosed()); @@ -10193,58 +10355,58 @@ public final class AdminProtos { return hash; } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom(byte[] data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom( byte[] data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -10256,7 +10418,7 @@ public final class AdminProtos { public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { @@ -10271,25 +10433,25 @@ public final class AdminProtos { return builder; } /** - * Protobuf type {@code hbase.pb.CloseRegionForSplitResponse} + * Protobuf type {@code hbase.pb.CloseRegionForSplitOrMergeResponse} */ public static final class Builder extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:hbase.pb.CloseRegionForSplitResponse) - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponseOrBuilder { + // @@protoc_insertion_point(builder_implements:hbase.pb.CloseRegionForSplitOrMergeResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponseOrBuilder { public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -10313,23 +10475,23 @@ public final class AdminProtos { public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_descriptor; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse build() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse result = buildPartial(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse buildPartial() { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse(this); + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -10368,16 +10530,16 @@ public final class AdminProtos { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) { - return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse)other); + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse other) { - if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.getDefaultInstance()) return this; if (other.hasClosed()) { setClosed(other.getClosed()); } @@ -10397,11 +10559,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parsedMessage = null; + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -10454,39 +10616,39 @@ public final class AdminProtos { } - // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitOrMergeResponse) } - // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitResponse) - private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitOrMergeResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse(); + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse(); } - public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse getDefaultInstance() { + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser - PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { - public CloseRegionForSplitResponse parsePartialFrom( + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public CloseRegionForSplitOrMergeResponse parsePartialFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { - return new CloseRegionForSplitResponse(input, extensionRegistry); + return new CloseRegionForSplitOrMergeResponse(input, extensionRegistry); } }; - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { return PARSER; } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse getDefaultInstanceForType() { + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -25173,7 +25335,7 @@ public final class AdminProtos { * optional .hbase.pb.TableName table_name = 1; */ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> getTableNameFieldBuilder() { if (tableNameBuilder_ == null) { tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< @@ -25241,7 +25403,7 @@ public final class AdminProtos { /** * repeated .hbase.pb.RegionLoad region_loads = 1; */ - java.util.List + java.util.List getRegionLoadsList(); /** * repeated .hbase.pb.RegionLoad region_loads = 1; @@ -25254,7 +25416,7 @@ public final class AdminProtos { /** * repeated .hbase.pb.RegionLoad region_loads = 1; */ - java.util.List + java.util.List getRegionLoadsOrBuilderList(); /** * repeated .hbase.pb.RegionLoad region_loads = 1; @@ -25352,7 +25514,7 @@ public final class AdminProtos { /** * repeated .hbase.pb.RegionLoad region_loads = 1; */ - public java.util.List + public java.util.List getRegionLoadsOrBuilderList() { return regionLoads_; } @@ -25659,7 +25821,7 @@ public final class AdminProtos { regionLoadsBuilder_ = null; regionLoads_ = other.regionLoads_; bitField0_ = (bitField0_ & ~0x00000001); - regionLoadsBuilder_ = + regionLoadsBuilder_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getRegionLoadsFieldBuilder() : null; } else { @@ -25895,7 +26057,7 @@ public final class AdminProtos { /** * repeated .hbase.pb.RegionLoad region_loads = 1; */ - public java.util.List + public java.util.List getRegionLoadsOrBuilderList() { if (regionLoadsBuilder_ != null) { return regionLoadsBuilder_.getMessageOrBuilderList(); @@ -25921,12 +26083,12 @@ public final class AdminProtos { /** * repeated .hbase.pb.RegionLoad region_loads = 1; */ - public java.util.List + public java.util.List getRegionLoadsBuilderList() { return getRegionLoadsFieldBuilder().getBuilderList(); } private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> + org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> getRegionLoadsFieldBuilder() { if (regionLoadsBuilder_ == null) { regionLoadsBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< @@ -26045,12 +26207,12 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** - * rpc CloseRegionForSplit(.hbase.pb.CloseRegionForSplitRequest) returns (.hbase.pb.CloseRegionForSplitResponse); + * rpc CloseRegionForSplitOrMerge(.hbase.pb.CloseRegionForSplitOrMergeRequest) returns (.hbase.pb.CloseRegionForSplitOrMergeResponse); */ - public abstract void closeRegionForSplit( + public abstract void closeRegionForSplitOrMerge( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** * rpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse); @@ -26202,11 +26364,11 @@ public final class AdminProtos { } @java.lang.Override - public void closeRegionForSplit( + public void closeRegionForSplitOrMerge( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { - impl.closeRegionForSplit(controller, request, done); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.closeRegionForSplitOrMerge(controller, request, done); } @java.lang.Override @@ -26340,7 +26502,7 @@ public final class AdminProtos { case 5: return impl.closeRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest)request); case 6: - return impl.closeRegionForSplit(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)request); + return impl.closeRegionForSplitOrMerge(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest)request); case 7: return impl.flushRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest)request); case 8: @@ -26392,7 +26554,7 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.getDefaultInstance(); case 7: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: @@ -26444,7 +26606,7 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.getDefaultInstance(); case 7: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: @@ -26526,12 +26688,12 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** - * rpc CloseRegionForSplit(.hbase.pb.CloseRegionForSplitRequest) returns (.hbase.pb.CloseRegionForSplitResponse); + * rpc CloseRegionForSplitOrMerge(.hbase.pb.CloseRegionForSplitOrMergeRequest) returns (.hbase.pb.CloseRegionForSplitOrMergeResponse); */ - public abstract void closeRegionForSplit( + public abstract void closeRegionForSplitOrMerge( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** * rpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse); @@ -26682,8 +26844,8 @@ public final class AdminProtos { done)); return; case 6: - this.closeRegionForSplit(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + this.closeRegionForSplitOrMerge(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 7: @@ -26773,7 +26935,7 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest.getDefaultInstance(); case 7: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: @@ -26825,7 +26987,7 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.getDefaultInstance(); case 7: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: @@ -26961,19 +27123,19 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance())); } - public void closeRegionForSplit( + public void closeRegionForSplitOrMerge( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(6), controller, request, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.getDefaultInstance(), org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( done, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.class, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance())); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.getDefaultInstance())); } public void flushRegion( @@ -27193,9 +27355,9 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse closeRegionForSplit( + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse closeRegionForSplitOrMerge( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse flushRegion( @@ -27338,15 +27500,15 @@ public final class AdminProtos { } - public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse closeRegionForSplit( + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse closeRegionForSplitOrMerge( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) channel.callBlockingMethod( + return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(6), controller, request, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance()); + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse.getDefaultInstance()); } @@ -27564,15 +27726,15 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_CloseRegionResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable; + internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable; + internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_FlushRegionRequest_descriptor; private static final @@ -27690,12 +27852,12 @@ public final class AdminProtos { internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_GetRegionLoadRequest_descriptor; - private static final + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_GetRegionLoadResponse_descriptor; - private static final + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable; @@ -27742,94 +27904,95 @@ public final class AdminProtos { "de\030\002 \001(\r\022\036\n\020transition_in_ZK\030\003 \001(\010:\004true" + "\0220\n\022destination_server\030\004 \001(\0132\024.hbase.pb." + "ServerName\022\027\n\017serverStartCode\030\005 \001(\004\"%\n\023C" + - "loseRegionResponse\022\016\n\006closed\030\001 \002(\010\"G\n\032Cl" + - "oseRegionForSplitRequest\022)\n\006region\030\001 \002(\013" + - "2\031.hbase.pb.RegionSpecifier\"-\n\033CloseRegi" + - "onForSplitResponse\022\016\n\006closed\030\001 \002(\010\"y\n\022Fl" + - "ushRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase", - ".pb.RegionSpecifier\022\030\n\020if_older_than_ts\030" + - "\002 \001(\004\022\036\n\026write_flush_wal_marker\030\003 \001(\010\"_\n" + - "\023FlushRegionResponse\022\027\n\017last_flush_time\030" + - "\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026wrote_flush_wa" + - "l_marker\030\003 \001(\010\"T\n\022SplitRegionRequest\022)\n\006" + - "region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022" + - "\023\n\013split_point\030\002 \001(\014\"\025\n\023SplitRegionRespo" + - "nse\"`\n\024CompactRegionRequest\022)\n\006region\030\001 " + - "\002(\0132\031.hbase.pb.RegionSpecifier\022\r\n\005major\030" + - "\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegionRe", - "sponse\"\315\001\n\031UpdateFavoredNodesRequest\022I\n\013" + - "update_info\030\001 \003(\01324.hbase.pb.UpdateFavor" + - "edNodesRequest.RegionUpdateInfo\032e\n\020Regio" + - "nUpdateInfo\022$\n\006region\030\001 \002(\0132\024.hbase.pb.R" + - "egionInfo\022+\n\rfavored_nodes\030\002 \003(\0132\024.hbase" + - ".pb.ServerName\".\n\032UpdateFavoredNodesResp" + - "onse\022\020\n\010response\030\001 \001(\r\"\244\001\n\023MergeRegionsR" + - "equest\022+\n\010region_a\030\001 \002(\0132\031.hbase.pb.Regi" + - "onSpecifier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb" + - ".RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005fals", - "e\022\032\n\022master_system_time\030\004 \001(\004\"\026\n\024MergeRe" + - "gionsResponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020" + - ".hbase.pb.WALKey\022\027\n\017key_value_bytes\030\002 \003(" + - "\014\022\035\n\025associated_cell_count\030\003 \001(\005\"\242\001\n\030Rep" + - "licateWALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.h" + - "base.pb.WALEntry\022\034\n\024replicationClusterId" + - "\030\002 \001(\t\022\"\n\032sourceBaseNamespaceDirPath\030\003 \001" + - "(\t\022!\n\031sourceHFileArchiveDirPath\030\004 \001(\t\"\033\n" + - "\031ReplicateWALEntryResponse\"\026\n\024RollWALWri" + - "terRequest\"0\n\025RollWALWriterResponse\022\027\n\017r", - "egion_to_flush\030\001 \003(\014\"#\n\021StopServerReques" + - "t\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerResponse\"" + - "\026\n\024GetServerInfoRequest\"K\n\nServerInfo\022)\n" + - "\013server_name\030\001 \002(\0132\024.hbase.pb.ServerName" + - "\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025GetServerInfoRes" + - "ponse\022)\n\013server_info\030\001 \002(\0132\024.hbase.pb.Se" + - "rverInfo\"\034\n\032UpdateConfigurationRequest\"\035" + - "\n\033UpdateConfigurationResponse\"?\n\024GetRegi" + - "onLoadRequest\022\'\n\ntable_name\030\001 \001(\0132\023.hbas" + - "e.pb.TableName\"C\n\025GetRegionLoadResponse\022", - "*\n\014region_loads\030\001 \003(\0132\024.hbase.pb.RegionL" + - "oad2\275\014\n\014AdminService\022P\n\rGetRegionInfo\022\036." + - "hbase.pb.GetRegionInfoRequest\032\037.hbase.pb" + - ".GetRegionInfoResponse\022M\n\014GetStoreFile\022\035" + - ".hbase.pb.GetStoreFileRequest\032\036.hbase.pb" + - ".GetStoreFileResponse\022V\n\017GetOnlineRegion" + - "\022 .hbase.pb.GetOnlineRegionRequest\032!.hba" + - "se.pb.GetOnlineRegionResponse\022G\n\nOpenReg" + - "ion\022\033.hbase.pb.OpenRegionRequest\032\034.hbase" + - ".pb.OpenRegionResponse\022M\n\014WarmupRegion\022\035", - ".hbase.pb.WarmupRegionRequest\032\036.hbase.pb" + - ".WarmupRegionResponse\022J\n\013CloseRegion\022\034.h" + - "base.pb.CloseRegionRequest\032\035.hbase.pb.Cl" + - "oseRegionResponse\022b\n\023CloseRegionForSplit" + - "\022$.hbase.pb.CloseRegionForSplitRequest\032%" + - ".hbase.pb.CloseRegionForSplitResponse\022J\n" + - "\013FlushRegion\022\034.hbase.pb.FlushRegionReque" + - "st\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spl" + - "itRegion\022\034.hbase.pb.SplitRegionRequest\032\035" + - ".hbase.pb.SplitRegionResponse\022P\n\rCompact", - "Region\022\036.hbase.pb.CompactRegionRequest\032\037" + - ".hbase.pb.CompactRegionResponse\022M\n\014Merge" + - "Regions\022\035.hbase.pb.MergeRegionsRequest\032\036" + - ".hbase.pb.MergeRegionsResponse\022\\\n\021Replic" + - "ateWALEntry\022\".hbase.pb.ReplicateWALEntry" + - "Request\032#.hbase.pb.ReplicateWALEntryResp" + - "onse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEn" + - "tryRequest\032#.hbase.pb.ReplicateWALEntryR" + - "esponse\022P\n\rRollWALWriter\022\036.hbase.pb.Roll" + - "WALWriterRequest\032\037.hbase.pb.RollWALWrite", - "rResponse\022P\n\rGetServerInfo\022\036.hbase.pb.Ge" + - "tServerInfoRequest\032\037.hbase.pb.GetServerI" + - "nfoResponse\022G\n\nStopServer\022\033.hbase.pb.Sto" + - "pServerRequest\032\034.hbase.pb.StopServerResp" + - "onse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Up" + - "dateFavoredNodesRequest\032$.hbase.pb.Updat" + - "eFavoredNodesResponse\022b\n\023UpdateConfigura" + - "tion\022$.hbase.pb.UpdateConfigurationReque" + - "st\032%.hbase.pb.UpdateConfigurationRespons" + - "e\022P\n\rGetRegionLoad\022\036.hbase.pb.GetRegionL", - "oadRequest\032\037.hbase.pb.GetRegionLoadRespo" + - "nseBH\n1org.apache.hadoop.hbase.shaded.pr" + - "otobuf.generatedB\013AdminProtosH\001\210\001\001\240\001\001" + "loseRegionResponse\022\016\n\006closed\030\001 \002(\010\"N\n!Cl" + + "oseRegionForSplitOrMergeRequest\022)\n\006regio" + + "n\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\"4\n\"Cl" + + "oseRegionForSplitOrMergeResponse\022\016\n\006clos" + + "ed\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006region", + "\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\030\n\020if_" + + "older_than_ts\030\002 \001(\004\022\036\n\026write_flush_wal_m" + + "arker\030\003 \001(\010\"_\n\023FlushRegionResponse\022\027\n\017la" + + "st_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026" + + "wrote_flush_wal_marker\030\003 \001(\010\"T\n\022SplitReg" + + "ionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + + "gionSpecifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023Sp" + + "litRegionResponse\"`\n\024CompactRegionReques" + + "t\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" + + "fier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025C", + "ompactRegionResponse\"\315\001\n\031UpdateFavoredNo" + + "desRequest\022I\n\013update_info\030\001 \003(\01324.hbase." + + "pb.UpdateFavoredNodesRequest.RegionUpdat" + + "eInfo\032e\n\020RegionUpdateInfo\022$\n\006region\030\001 \002(" + + "\0132\024.hbase.pb.RegionInfo\022+\n\rfavored_nodes" + + "\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032UpdateFa" + + "voredNodesResponse\022\020\n\010response\030\001 \001(\r\"\244\001\n" + + "\023MergeRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031" + + ".hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 " + + "\002(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcib", + "le\030\003 \001(\010:\005false\022\032\n\022master_system_time\030\004 " + + "\001(\004\"\026\n\024MergeRegionsResponse\"a\n\010WALEntry\022" + + "\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n\017key_va" + + "lue_bytes\030\002 \003(\014\022\035\n\025associated_cell_count" + + "\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryRequest\022!\n\005e" + + "ntry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n\024replic" + + "ationClusterId\030\002 \001(\t\022\"\n\032sourceBaseNamesp" + + "aceDirPath\030\003 \001(\t\022!\n\031sourceHFileArchiveDi" + + "rPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryResponse" + + "\"\026\n\024RollWALWriterRequest\"0\n\025RollWALWrite", + "rResponse\022\027\n\017region_to_flush\030\001 \003(\014\"#\n\021St" + + "opServerRequest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopS" + + "erverResponse\"\026\n\024GetServerInfoRequest\"K\n" + + "\nServerInfo\022)\n\013server_name\030\001 \002(\0132\024.hbase" + + ".pb.ServerName\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025Ge" + + "tServerInfoResponse\022)\n\013server_info\030\001 \002(\013" + + "2\024.hbase.pb.ServerInfo\"\034\n\032UpdateConfigur" + + "ationRequest\"\035\n\033UpdateConfigurationRespo" + + "nse\"?\n\024GetRegionLoadRequest\022\'\n\ntable_nam" + + "e\030\001 \001(\0132\023.hbase.pb.TableName\"C\n\025GetRegio", + "nLoadResponse\022*\n\014region_loads\030\001 \003(\0132\024.hb" + + "ase.pb.RegionLoad2\322\014\n\014AdminService\022P\n\rGe" + + "tRegionInfo\022\036.hbase.pb.GetRegionInfoRequ" + + "est\032\037.hbase.pb.GetRegionInfoResponse\022M\n\014" + + "GetStoreFile\022\035.hbase.pb.GetStoreFileRequ" + + "est\032\036.hbase.pb.GetStoreFileResponse\022V\n\017G" + + "etOnlineRegion\022 .hbase.pb.GetOnlineRegio" + + "nRequest\032!.hbase.pb.GetOnlineRegionRespo" + + "nse\022G\n\nOpenRegion\022\033.hbase.pb.OpenRegionR" + + "equest\032\034.hbase.pb.OpenRegionResponse\022M\n\014", + "WarmupRegion\022\035.hbase.pb.WarmupRegionRequ" + + "est\032\036.hbase.pb.WarmupRegionResponse\022J\n\013C" + + "loseRegion\022\034.hbase.pb.CloseRegionRequest" + + "\032\035.hbase.pb.CloseRegionResponse\022w\n\032Close" + + "RegionForSplitOrMerge\022+.hbase.pb.CloseRe" + + "gionForSplitOrMergeRequest\032,.hbase.pb.Cl" + + "oseRegionForSplitOrMergeResponse\022J\n\013Flus" + + "hRegion\022\034.hbase.pb.FlushRegionRequest\032\035." + + "hbase.pb.FlushRegionResponse\022J\n\013SplitReg" + + "ion\022\034.hbase.pb.SplitRegionRequest\032\035.hbas", + "e.pb.SplitRegionResponse\022P\n\rCompactRegio" + + "n\022\036.hbase.pb.CompactRegionRequest\032\037.hbas" + + "e.pb.CompactRegionResponse\022M\n\014MergeRegio" + + "ns\022\035.hbase.pb.MergeRegionsRequest\032\036.hbas" + + "e.pb.MergeRegionsResponse\022\\\n\021ReplicateWA" + + "LEntry\022\".hbase.pb.ReplicateWALEntryReque" + + "st\032#.hbase.pb.ReplicateWALEntryResponse\022" + + "Q\n\006Replay\022\".hbase.pb.ReplicateWALEntryRe" + + "quest\032#.hbase.pb.ReplicateWALEntryRespon" + + "se\022P\n\rRollWALWriter\022\036.hbase.pb.RollWALWr", + "iterRequest\032\037.hbase.pb.RollWALWriterResp" + + "onse\022P\n\rGetServerInfo\022\036.hbase.pb.GetServ" + + "erInfoRequest\032\037.hbase.pb.GetServerInfoRe" + + "sponse\022G\n\nStopServer\022\033.hbase.pb.StopServ" + + "erRequest\032\034.hbase.pb.StopServerResponse\022" + + "_\n\022UpdateFavoredNodes\022#.hbase.pb.UpdateF" + + "avoredNodesRequest\032$.hbase.pb.UpdateFavo" + + "redNodesResponse\022b\n\023UpdateConfiguration\022" + + "$.hbase.pb.UpdateConfigurationRequest\032%." + + "hbase.pb.UpdateConfigurationResponse\022P\n\r", + "GetRegionLoad\022\036.hbase.pb.GetRegionLoadRe" + + "quest\032\037.hbase.pb.GetRegionLoadResponseBH" + + "\n1org.apache.hadoop.hbase.shaded.protobu" + + "f.generatedB\013AdminProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -27924,17 +28087,17 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CloseRegionResponse_descriptor, new java.lang.String[] { "Closed", }); - internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor = + internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_descriptor = getDescriptor().getMessageTypes().get(12); - internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable = new + internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor, + internal_static_hbase_pb_CloseRegionForSplitOrMergeRequest_descriptor, new java.lang.String[] { "Region", }); - internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor = + internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_descriptor = getDescriptor().getMessageTypes().get(13); - internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable = new + internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor, + internal_static_hbase_pb_CloseRegionForSplitOrMergeResponse_descriptor, new java.lang.String[] { "Closed", }); internal_static_hbase_pb_FlushRegionRequest_descriptor = getDescriptor().getMessageTypes().get(14); diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java index 0be98ff..8f5992c 100644 --- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java +++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java @@ -1833,6 +1833,177 @@ public final class MasterProcedureProtos { } /** + * Protobuf enum {@code hbase.pb.MergeTableRegionsState} + */ + public enum MergeTableRegionsState + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum { + /** + * MERGE_TABLE_REGIONS_PREPARE = 1; + */ + MERGE_TABLE_REGIONS_PREPARE(1), + /** + * MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 2; + */ + MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS(2), + /** + * MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 3; + */ + MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION(3), + /** + * MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE = 4; + */ + MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE(4), + /** + * MERGE_TABLE_REGIONS_CLOSE_REGIONS = 5; + */ + MERGE_TABLE_REGIONS_CLOSE_REGIONS(5), + /** + * MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 6; + */ + MERGE_TABLE_REGIONS_CREATE_MERGED_REGION(6), + /** + * MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 7; + */ + MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION(7), + /** + * MERGE_TABLE_REGIONS_UPDATE_META = 8; + */ + MERGE_TABLE_REGIONS_UPDATE_META(8), + /** + * MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 9; + */ + MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION(9), + /** + * MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 10; + */ + MERGE_TABLE_REGIONS_OPEN_MERGED_REGION(10), + /** + * MERGE_TABLE_REGIONS_POST_OPERATION = 11; + */ + MERGE_TABLE_REGIONS_POST_OPERATION(11), + ; + + /** + * MERGE_TABLE_REGIONS_PREPARE = 1; + */ + public static final int MERGE_TABLE_REGIONS_PREPARE_VALUE = 1; + /** + * MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 2; + */ + public static final int MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS_VALUE = 2; + /** + * MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 3; + */ + public static final int MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION_VALUE = 3; + /** + * MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE = 4; + */ + public static final int MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE_VALUE = 4; + /** + * MERGE_TABLE_REGIONS_CLOSE_REGIONS = 5; + */ + public static final int MERGE_TABLE_REGIONS_CLOSE_REGIONS_VALUE = 5; + /** + * MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 6; + */ + public static final int MERGE_TABLE_REGIONS_CREATE_MERGED_REGION_VALUE = 6; + /** + * MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 7; + */ + public static final int MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION_VALUE = 7; + /** + * MERGE_TABLE_REGIONS_UPDATE_META = 8; + */ + public static final int MERGE_TABLE_REGIONS_UPDATE_META_VALUE = 8; + /** + * MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 9; + */ + public static final int MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION_VALUE = 9; + /** + * MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 10; + */ + public static final int MERGE_TABLE_REGIONS_OPEN_MERGED_REGION_VALUE = 10; + /** + * MERGE_TABLE_REGIONS_POST_OPERATION = 11; + */ + public static final int MERGE_TABLE_REGIONS_POST_OPERATION_VALUE = 11; + + + public final int getNumber() { + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static MergeTableRegionsState valueOf(int value) { + return forNumber(value); + } + + public static MergeTableRegionsState forNumber(int value) { + switch (value) { + case 1: return MERGE_TABLE_REGIONS_PREPARE; + case 2: return MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS; + case 3: return MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION; + case 4: return MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE; + case 5: return MERGE_TABLE_REGIONS_CLOSE_REGIONS; + case 6: return MERGE_TABLE_REGIONS_CREATE_MERGED_REGION; + case 7: return MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION; + case 8: return MERGE_TABLE_REGIONS_UPDATE_META; + case 9: return MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION; + case 10: return MERGE_TABLE_REGIONS_OPEN_MERGED_REGION; + case 11: return MERGE_TABLE_REGIONS_POST_OPERATION; + default: return null; + } + } + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap< + MergeTableRegionsState> internalValueMap = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap() { + public MergeTableRegionsState findValueByNumber(int number) { + return MergeTableRegionsState.forNumber(number); + } + }; + + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(15); + } + + private static final MergeTableRegionsState[] VALUES = values(); + + public static MergeTableRegionsState valueOf( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private MergeTableRegionsState(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.MergeTableRegionsState) + } + + /** * Protobuf enum {@code hbase.pb.SplitTableRegionState} */ public enum SplitTableRegionState @@ -1850,9 +2021,9 @@ public final class MasterProcedureProtos { */ SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE(3), /** - * SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4; + * SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 4; */ - SPLIT_TABLE_REGION_CLOSED_PARENT_REGION(4), + SPLIT_TABLE_REGION_CLOSE_PARENT_REGION(4), /** * SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5; */ @@ -1892,9 +2063,9 @@ public final class MasterProcedureProtos { */ public static final int SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE_VALUE = 3; /** - * SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4; + * SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 4; */ - public static final int SPLIT_TABLE_REGION_CLOSED_PARENT_REGION_VALUE = 4; + public static final int SPLIT_TABLE_REGION_CLOSE_PARENT_REGION_VALUE = 4; /** * SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5; */ @@ -1938,7 +2109,7 @@ public final class MasterProcedureProtos { case 1: return SPLIT_TABLE_REGION_PREPARE; case 2: return SPLIT_TABLE_REGION_PRE_OPERATION; case 3: return SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE; - case 4: return SPLIT_TABLE_REGION_CLOSED_PARENT_REGION; + case 4: return SPLIT_TABLE_REGION_CLOSE_PARENT_REGION; case 5: return SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS; case 6: return SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR; case 7: return SPLIT_TABLE_REGION_UPDATE_META; @@ -1971,7 +2142,7 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(15); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(16); } private static final SplitTableRegionState[] VALUES = values(); @@ -2132,7 +2303,7 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(16); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(17); } private static final ServerCrashState[] VALUES = values(); @@ -21109,8 +21280,8 @@ public final class MasterProcedureProtos { } - public interface SplitTableRegionStateDataOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionStateData) + public interface MergeTableRegionsStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsStateData) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** @@ -21127,55 +21298,65 @@ public final class MasterProcedureProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); /** - * required .hbase.pb.RegionInfo parent_region_info = 2; + * repeated .hbase.pb.RegionInfo region_info = 2; */ - boolean hasParentRegionInfo(); + java.util.List + getRegionInfoList(); /** - * required .hbase.pb.RegionInfo parent_region_info = 2; + * repeated .hbase.pb.RegionInfo region_info = 2; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo(); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index); /** - * required .hbase.pb.RegionInfo parent_region_info = 2; + * repeated .hbase.pb.RegionInfo region_info = 2; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder(); + int getRegionInfoCount(); + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + java.util.List + getRegionInfoOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index); /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * required .hbase.pb.RegionInfo merged_region_info = 3; */ - java.util.List - getChildRegionInfoList(); + boolean hasMergedRegionInfo(); /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * required .hbase.pb.RegionInfo merged_region_info = 3; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getMergedRegionInfo(); /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * required .hbase.pb.RegionInfo merged_region_info = 3; */ - int getChildRegionInfoCount(); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getMergedRegionInfoOrBuilder(); + /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * optional bool forcible = 4 [default = false]; */ - java.util.List - getChildRegionInfoOrBuilderList(); + boolean hasForcible(); /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * optional bool forcible = 4 [default = false]; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder( - int index); + boolean getForcible(); } /** - * Protobuf type {@code hbase.pb.SplitTableRegionStateData} + * Protobuf type {@code hbase.pb.MergeTableRegionsStateData} */ - public static final class SplitTableRegionStateData extends + public static final class MergeTableRegionsStateData extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionStateData) - SplitTableRegionStateDataOrBuilder { - // Use SplitTableRegionStateData.newBuilder() to construct. - private SplitTableRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsStateData) + MergeTableRegionsStateDataOrBuilder { + // Use MergeTableRegionsStateData.newBuilder() to construct. + private MergeTableRegionsStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private SplitTableRegionStateData() { - childRegionInfo_ = java.util.Collections.emptyList(); + private MergeTableRegionsStateData() { + regionInfo_ = java.util.Collections.emptyList(); + forcible_ = false; } @java.lang.Override @@ -21183,7 +21364,7 @@ public final class MasterProcedureProtos { getUnknownFields() { return this.unknownFields; } - private SplitTableRegionStateData( + private MergeTableRegionsStateData( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { @@ -21220,25 +21401,30 @@ public final class MasterProcedureProtos { break; } case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + regionInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + regionInfo_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + case 26: { org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = parentRegionInfo_.toBuilder(); + subBuilder = mergedRegionInfo_.toBuilder(); } - parentRegionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + mergedRegionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(parentRegionInfo_); - parentRegionInfo_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(mergedRegionInfo_); + mergedRegionInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - childRegionInfo_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - childRegionInfo_.add( - input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + case 32: { + bitField0_ |= 0x00000004; + forcible_ = input.readBool(); break; } } @@ -21249,8 +21435,8 @@ public final class MasterProcedureProtos { throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - childRegionInfo_ = java.util.Collections.unmodifiableList(childRegionInfo_); + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -21258,14 +21444,14 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.Builder.class); } private int bitField0_; @@ -21290,60 +21476,75 @@ public final class MasterProcedureProtos { return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; } - public static final int PARENT_REGION_INFO_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo parentRegionInfo_; + public static final int REGION_INFO_FIELD_NUMBER = 2; + private java.util.List regionInfo_; /** - * required .hbase.pb.RegionInfo parent_region_info = 2; + * repeated .hbase.pb.RegionInfo region_info = 2; */ - public boolean hasParentRegionInfo() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public java.util.List getRegionInfoList() { + return regionInfo_; } /** - * required .hbase.pb.RegionInfo parent_region_info = 2; + * repeated .hbase.pb.RegionInfo region_info = 2; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo() { - return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + public java.util.List + getRegionInfoOrBuilderList() { + return regionInfo_; } /** - * required .hbase.pb.RegionInfo parent_region_info = 2; + * repeated .hbase.pb.RegionInfo region_info = 2; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder() { - return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + public int getRegionInfoCount() { + return regionInfo_.size(); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + return regionInfo_.get(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + return regionInfo_.get(index); } - public static final int CHILD_REGION_INFO_FIELD_NUMBER = 3; - private java.util.List childRegionInfo_; + public static final int MERGED_REGION_INFO_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo mergedRegionInfo_; /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * required .hbase.pb.RegionInfo merged_region_info = 3; */ - public java.util.List getChildRegionInfoList() { - return childRegionInfo_; + public boolean hasMergedRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * required .hbase.pb.RegionInfo merged_region_info = 3; */ - public java.util.List - getChildRegionInfoOrBuilderList() { - return childRegionInfo_; + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getMergedRegionInfo() { + return mergedRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_; } /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * required .hbase.pb.RegionInfo merged_region_info = 3; */ - public int getChildRegionInfoCount() { - return childRegionInfo_.size(); + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getMergedRegionInfoOrBuilder() { + return mergedRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_; } + + public static final int FORCIBLE_FIELD_NUMBER = 4; + private boolean forcible_; /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * optional bool forcible = 4 [default = false]; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index) { - return childRegionInfo_.get(index); + public boolean hasForcible() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * repeated .hbase.pb.RegionInfo child_region_info = 3; + * optional bool forcible = 4 [default = false]; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder( - int index) { - return childRegionInfo_.get(index); + public boolean getForcible() { + return forcible_; } private byte memoizedIsInitialized = -1; @@ -21356,7 +21557,7 @@ public final class MasterProcedureProtos { memoizedIsInitialized = 0; return false; } - if (!hasParentRegionInfo()) { + if (!hasMergedRegionInfo()) { memoizedIsInitialized = 0; return false; } @@ -21364,16 +21565,16 @@ public final class MasterProcedureProtos { memoizedIsInitialized = 0; return false; } - if (!getParentRegionInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getChildRegionInfoCount(); i++) { - if (!getChildRegionInfo(i).isInitialized()) { + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } + if (!getMergedRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -21383,11 +21584,1252 @@ public final class MasterProcedureProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, getUserInfo()); } + for (int i = 0; i < regionInfo_.size(); i++) { + output.writeMessage(2, regionInfo_.get(i)); + } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, getParentRegionInfo()); + output.writeMessage(3, getMergedRegionInfo()); } - for (int i = 0; i < childRegionInfo_.size(); i++) { - output.writeMessage(3, childRegionInfo_.get(i)); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(4, forcible_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getUserInfo()); + } + for (int i = 0; i < regionInfo_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, regionInfo_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getMergedRegionInfo()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBoolSize(4, forcible_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && getRegionInfoList() + .equals(other.getRegionInfoList()); + result = result && (hasMergedRegionInfo() == other.hasMergedRegionInfo()); + if (hasMergedRegionInfo()) { + result = result && getMergedRegionInfo() + .equals(other.getMergedRegionInfo()); + } + result = result && (hasForcible() == other.hasForcible()); + if (hasForcible()) { + result = result && (getForcible() + == other.getForcible()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (getRegionInfoCount() > 0) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoList().hashCode(); + } + if (hasMergedRegionInfo()) { + hash = (37 * hash) + MERGED_REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getMergedRegionInfo().hashCode(); + } + if (hasForcible()) { + hash = (37 * hash) + FORCIBLE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( + getForcible()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MergeTableRegionsStateData} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.MergeTableRegionsStateData) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateDataOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getRegionInfoFieldBuilder(); + getMergedRegionInfoFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = null; + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + regionInfoBuilder_.clear(); + } + if (mergedRegionInfoBuilder_ == null) { + mergedRegionInfo_ = null; + } else { + mergedRegionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + forcible_ = false; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + if (mergedRegionInfoBuilder_ == null) { + result.mergedRegionInfo_ = mergedRegionInfo_; + } else { + result.mergedRegionInfo_ = mergedRegionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.forcible_ = forcible_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (regionInfoBuilder_ == null) { + if (!other.regionInfo_.isEmpty()) { + if (regionInfo_.isEmpty()) { + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRegionInfoIsMutable(); + regionInfo_.addAll(other.regionInfo_); + } + onChanged(); + } + } else { + if (!other.regionInfo_.isEmpty()) { + if (regionInfoBuilder_.isEmpty()) { + regionInfoBuilder_.dispose(); + regionInfoBuilder_ = null; + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000002); + regionInfoBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getRegionInfoFieldBuilder() : null; + } else { + regionInfoBuilder_.addAllMessages(other.regionInfo_); + } + } + } + if (other.hasMergedRegionInfo()) { + mergeMergedRegionInfo(other.getMergedRegionInfo()); + } + if (other.hasForcible()) { + setForcible(other.getForcible()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + return false; + } + if (!hasMergedRegionInfo()) { + return false; + } + if (!getUserInfo().isInitialized()) { + return false; + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + return false; + } + } + if (!getMergedRegionInfo().isInitialized()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation userInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != null && + userInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = null; + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + getUserInfo(), + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + private java.util.List regionInfo_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + regionInfo_ = new java.util.ArrayList(regionInfo_); + bitField0_ |= 0x00000002; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public java.util.List getRegionInfoList() { + if (regionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfo_); + } else { + return regionInfoBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public int getRegionInfoCount() { + if (regionInfoBuilder_ == null) { + return regionInfo_.size(); + } else { + return regionInfoBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); + } else { + return regionInfoBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.set(index, value); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder addRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(index, value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder addRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder addAllRegionInfo( + java.lang.Iterable values) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, regionInfo_); + onChanged(); + } else { + regionInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public Builder removeRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.remove(index); + onChanged(); + } else { + regionInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); } else { + return regionInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public java.util.List + getRegionInfoOrBuilderList() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfo_); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() { + return getRegionInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 2; + */ + public java.util.List + getRegionInfoBuilderList() { + return getRegionInfoFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo mergedRegionInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> mergedRegionInfoBuilder_; + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public boolean hasMergedRegionInfo() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getMergedRegionInfo() { + if (mergedRegionInfoBuilder_ == null) { + return mergedRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_; + } else { + return mergedRegionInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public Builder setMergedRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (mergedRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mergedRegionInfo_ = value; + onChanged(); + } else { + mergedRegionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public Builder setMergedRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (mergedRegionInfoBuilder_ == null) { + mergedRegionInfo_ = builderForValue.build(); + onChanged(); + } else { + mergedRegionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public Builder mergeMergedRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (mergedRegionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + mergedRegionInfo_ != null && + mergedRegionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + mergedRegionInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(mergedRegionInfo_).mergeFrom(value).buildPartial(); + } else { + mergedRegionInfo_ = value; + } + onChanged(); + } else { + mergedRegionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public Builder clearMergedRegionInfo() { + if (mergedRegionInfoBuilder_ == null) { + mergedRegionInfo_ = null; + onChanged(); + } else { + mergedRegionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getMergedRegionInfoBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getMergedRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getMergedRegionInfoOrBuilder() { + if (mergedRegionInfoBuilder_ != null) { + return mergedRegionInfoBuilder_.getMessageOrBuilder(); + } else { + return mergedRegionInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_; + } + } + /** + * required .hbase.pb.RegionInfo merged_region_info = 3; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getMergedRegionInfoFieldBuilder() { + if (mergedRegionInfoBuilder_ == null) { + mergedRegionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getMergedRegionInfo(), + getParentForChildren(), + isClean()); + mergedRegionInfo_ = null; + } + return mergedRegionInfoBuilder_; + } + + private boolean forcible_ ; + /** + * optional bool forcible = 4 [default = false]; + */ + public boolean hasForcible() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool forcible = 4 [default = false]; + */ + public boolean getForcible() { + return forcible_; + } + /** + * optional bool forcible = 4 [default = false]; + */ + public Builder setForcible(boolean value) { + bitField0_ |= 0x00000008; + forcible_ = value; + onChanged(); + return this; + } + /** + * optional bool forcible = 4 [default = false]; + */ + public Builder clearForcible() { + bitField0_ = (bitField0_ & ~0x00000008); + forcible_ = false; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.MergeTableRegionsStateData) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MergeTableRegionsStateData) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public MergeTableRegionsStateData parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new MergeTableRegionsStateData(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface SplitTableRegionStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionStateData) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + /** + * required .hbase.pb.RegionInfo parent_region_info = 2; + */ + boolean hasParentRegionInfo(); + /** + * required .hbase.pb.RegionInfo parent_region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo(); + /** + * required .hbase.pb.RegionInfo parent_region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder(); + + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + java.util.List + getChildRegionInfoList(); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + int getChildRegionInfoCount(); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + java.util.List + getChildRegionInfoOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.SplitTableRegionStateData} + */ + public static final class SplitTableRegionStateData extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionStateData) + SplitTableRegionStateDataOrBuilder { + // Use SplitTableRegionStateData.newBuilder() to construct. + private SplitTableRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SplitTableRegionStateData() { + childRegionInfo_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitTableRegionStateData( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = parentRegionInfo_.toBuilder(); + } + parentRegionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parentRegionInfo_); + parentRegionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + childRegionInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + childRegionInfo_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + childRegionInfo_ = java.util.Collections.unmodifiableList(childRegionInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.Builder.class); + } + + private int bitField0_; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } + + public static final int PARENT_REGION_INFO_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo parentRegionInfo_; + /** + * required .hbase.pb.RegionInfo parent_region_info = 2; + */ + public boolean hasParentRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo() { + return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder() { + return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + } + + public static final int CHILD_REGION_INFO_FIELD_NUMBER = 3; + private java.util.List childRegionInfo_; + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + public java.util.List getChildRegionInfoList() { + return childRegionInfo_; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + public java.util.List + getChildRegionInfoOrBuilderList() { + return childRegionInfo_; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + public int getChildRegionInfoCount() { + return childRegionInfo_.size(); + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index) { + return childRegionInfo_.get(index); + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder( + int index) { + return childRegionInfo_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasParentRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getParentRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getChildRegionInfoCount(); i++) { + if (!getChildRegionInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getUserInfo()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getParentRegionInfo()); + } + for (int i = 0; i < childRegionInfo_.size(); i++) { + output.writeMessage(3, childRegionInfo_.get(i)); } unknownFields.writeTo(output); } @@ -23985,6 +25427,11 @@ public final class MasterProcedureProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_DispatchMergingRegionsStateData_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MergeTableRegionsStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_SplitTableRegionStateData_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -24082,124 +25529,141 @@ public final class MasterProcedureProtos { "e.pb.UserInformation\022\'\n\ntable_name\030\002 \002(\013" + "2\023.hbase.pb.TableName\022)\n\013region_info\030\003 \003" + "(\0132\024.hbase.pb.RegionInfo\022\020\n\010forcible\030\004 \001" + - "(\010\"\254\001\n\031SplitTableRegionStateData\022,\n\tuser", - "_info\030\001 \002(\0132\031.hbase.pb.UserInformation\0220" + - "\n\022parent_region_info\030\002 \002(\0132\024.hbase.pb.Re" + - "gionInfo\022/\n\021child_region_info\030\003 \003(\0132\024.hb" + - "ase.pb.RegionInfo\"\201\002\n\024ServerCrashStateDa" + - "ta\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.Serve" + - "rName\022\036\n\026distributed_log_replay\030\002 \001(\010\0227\n" + - "\031regions_on_crashed_server\030\003 \003(\0132\024.hbase" + - ".pb.RegionInfo\022.\n\020regions_assigned\030\004 \003(\013" + - "2\024.hbase.pb.RegionInfo\022\025\n\rcarrying_meta\030" + - "\005 \001(\010\022\036\n\020should_split_wal\030\006 \001(\010:\004true*\330\001", - "\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE_O" + - "PERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYO" + - "UT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033CR" + - "EATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TA" + - "BLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_" + - "POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022\030\n" + - "\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_" + - "PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE_T" + - "ABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE" + - "_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE", - "_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERAT" + - "ION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIONS" + - "\020\007*\212\002\n\022TruncateTableState\022 \n\034TRUNCATE_TA" + - "BLE_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_RE" + - "MOVE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEAR" + - "_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_FS" + - "_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META\020" + - "\005\022!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n\035" + - "TRUNCATE_TABLE_POST_OPERATION\020\007*\337\001\n\020Dele" + - "teTableState\022\036\n\032DELETE_TABLE_PRE_OPERATI", - "ON\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020\002\022" + - " \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DEL" + - "ETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_" + - "TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE" + - "_POST_OPERATION\020\006*\320\001\n\024CreateNamespaceSta" + - "te\022\034\n\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CREA" + - "TE_NAMESPACE_CREATE_DIRECTORY\020\002\022)\n%CREAT" + - "E_NAMESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032CR" + - "EATE_NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_NAM" + - "ESPACE_SET_NAMESPACE_QUOTA\020\005*z\n\024ModifyNa", - "mespaceState\022\034\n\030MODIFY_NAMESPACE_PREPARE" + - "\020\001\022$\n MODIFY_NAMESPACE_UPDATE_NS_TABLE\020\002" + - "\022\036\n\032MODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024Del" + - "eteNamespaceState\022\034\n\030DELETE_NAMESPACE_PR" + - "EPARE\020\001\022)\n%DELETE_NAMESPACE_DELETE_FROM_" + - "NS_TABLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_FR" + - "OM_ZK\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIREC" + - "TORIES\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NAME" + - "SPACE_QUOTA\020\005*\331\001\n\024AddColumnFamilyState\022\035" + - "\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_COL", - "UMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLUMN" + - "_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n ADD" + - "_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD_C" + - "OLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027Mo" + - "difyColumnFamilyState\022 \n\034MODIFY_COLUMN_F" + - "AMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMILY_" + - "PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMILY_" + - "UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_COLU" + - "MN_FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_COL" + - "UMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027Dele", - "teColumnFamilyState\022 \n\034DELETE_COLUMN_FAM" + - "ILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_PR" + - "E_OPERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_UP" + - "DATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLUMN" + - "_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_COL" + - "UMN_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_CO" + - "LUMN_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020Ena" + - "bleTableState\022\030\n\024ENABLE_TABLE_PREPARE\020\001\022" + - "\036\n\032ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENABL" + - "E_TABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n EN", - "ABLE_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$ENAB" + - "LE_TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033EN" + - "ABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021DisableT" + - "ableState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037\n\033" + - "DISABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISABLE" + - "_TABLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"DI" + - "SABLE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&DI" + - "SABLE_TABLE_SET_DISABLED_TABLE_STATE\020\005\022 " + - "\n\034DISABLE_TABLE_POST_OPERATION\020\006*\346\001\n\022Clo" + - "neSnapshotState\022 \n\034CLONE_SNAPSHOT_PRE_OP", - "ERATION\020\001\022\"\n\036CLONE_SNAPSHOT_WRITE_FS_LAY" + - "OUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_TO_META\020\003\022!\n" + - "\035CLONE_SNAPSHOT_ASSIGN_REGIONS\020\004\022$\n CLON" + - "E_SNAPSHOT_UPDATE_DESC_CACHE\020\005\022!\n\035CLONE_" + - "SNAPSHOT_POST_OPERATION\020\006*\260\001\n\024RestoreSna" + - "pshotState\022\"\n\036RESTORE_SNAPSHOT_PRE_OPERA" + - "TION\020\001\022,\n(RESTORE_SNAPSHOT_UPDATE_TABLE_" + - "DESCRIPTOR\020\002\022$\n RESTORE_SNAPSHOT_WRITE_F" + - "S_LAYOUT\020\003\022 \n\034RESTORE_SNAPSHOT_UPDATE_ME" + - "TA\020\004*\376\001\n\033DispatchMergingRegionsState\022$\n ", - "DISPATCH_MERGING_REGIONS_PREPARE\020\001\022*\n&DI" + - "SPATCH_MERGING_REGIONS_PRE_OPERATION\020\002\0223" + - "\n/DISPATCH_MERGING_REGIONS_MOVE_REGION_T" + - "O_SAME_RS\020\003\022+\n\'DISPATCH_MERGING_REGIONS_" + - "DO_MERGE_IN_RS\020\004\022+\n\'DISPATCH_MERGING_REG" + - "IONS_POST_OPERATION\020\005*\305\003\n\025SplitTableRegi" + - "onState\022\036\n\032SPLIT_TABLE_REGION_PREPARE\020\001\022" + - "$\n SPLIT_TABLE_REGION_PRE_OPERATION\020\002\0220\n" + - ",SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_" + - "STATE\020\003\022+\n\'SPLIT_TABLE_REGION_CLOSED_PAR", - "ENT_REGION\020\004\022.\n*SPLIT_TABLE_REGION_CREAT" + - "E_DAUGHTER_REGIONS\020\005\0220\n,SPLIT_TABLE_REGI" + - "ON_PRE_OPERATION_BEFORE_PONR\020\006\022\"\n\036SPLIT_" + - "TABLE_REGION_UPDATE_META\020\007\022/\n+SPLIT_TABL" + - "E_REGION_PRE_OPERATION_AFTER_PONR\020\010\022)\n%S" + - "PLIT_TABLE_REGION_OPEN_CHILD_REGIONS\020\t\022%" + - "\n!SPLIT_TABLE_REGION_POST_OPERATION\020\n*\234\002" + - "\n\020ServerCrashState\022\026\n\022SERVER_CRASH_START" + - "\020\001\022\035\n\031SERVER_CRASH_PROCESS_META\020\002\022\034\n\030SER" + - "VER_CRASH_GET_REGIONS\020\003\022\036\n\032SERVER_CRASH_", - "NO_SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH_SPLIT_LO" + - "GS\020\005\022#\n\037SERVER_CRASH_PREPARE_LOG_REPLAY\020" + - "\006\022\027\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033SERVER_CRA" + - "SH_WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_CRASH_FINI" + - "SH\020dBR\n1org.apache.hadoop.hbase.shaded.p" + - "rotobuf.generatedB\025MasterProcedureProtos" + - "H\001\210\001\001\240\001\001" + "(\010\"\300\001\n\032MergeTableRegionsStateData\022,\n\tuse", + "r_info\030\001 \002(\0132\031.hbase.pb.UserInformation\022" + + ")\n\013region_info\030\002 \003(\0132\024.hbase.pb.RegionIn" + + "fo\0220\n\022merged_region_info\030\003 \002(\0132\024.hbase.p" + + "b.RegionInfo\022\027\n\010forcible\030\004 \001(\010:\005false\"\254\001" + + "\n\031SplitTableRegionStateData\022,\n\tuser_info" + + "\030\001 \002(\0132\031.hbase.pb.UserInformation\0220\n\022par" + + "ent_region_info\030\002 \002(\0132\024.hbase.pb.RegionI" + + "nfo\022/\n\021child_region_info\030\003 \003(\0132\024.hbase.p" + + "b.RegionInfo\"\201\002\n\024ServerCrashStateData\022)\n" + + "\013server_name\030\001 \002(\0132\024.hbase.pb.ServerName", + "\022\036\n\026distributed_log_replay\030\002 \001(\010\0227\n\031regi" + + "ons_on_crashed_server\030\003 \003(\0132\024.hbase.pb.R" + + "egionInfo\022.\n\020regions_assigned\030\004 \003(\0132\024.hb" + + "ase.pb.RegionInfo\022\025\n\rcarrying_meta\030\005 \001(\010" + + "\022\036\n\020should_split_wal\030\006 \001(\010:\004true*\330\001\n\020Cre" + + "ateTableState\022\036\n\032CREATE_TABLE_PRE_OPERAT" + + "ION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022" + + "\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_" + + "TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_U" + + "PDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_", + "OPERATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODI" + + "FY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_O" + + "PERATION\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_" + + "DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPL" + + "ICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_L" + + "AYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATION\020\006" + + "\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007*\212\002" + + "\n\022TruncateTableState\022 \n\034TRUNCATE_TABLE_P" + + "RE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_REMOVE_" + + "FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEAR_FS_L", + "AYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_FS_LAYO" + + "UT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META\020\005\022!\n\035" + + "TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n\035TRUNC" + + "ATE_TABLE_POST_OPERATION\020\007*\337\001\n\020DeleteTab" + + "leState\022\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022" + + "!\n\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n\034DE" + + "LETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_T" + + "ABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE" + + "_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST" + + "_OPERATION\020\006*\320\001\n\024CreateNamespaceState\022\034\n", + "\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CREATE_NA" + + "MESPACE_CREATE_DIRECTORY\020\002\022)\n%CREATE_NAM" + + "ESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032CREATE_" + + "NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_NAMESPAC" + + "E_SET_NAMESPACE_QUOTA\020\005*z\n\024ModifyNamespa" + + "ceState\022\034\n\030MODIFY_NAMESPACE_PREPARE\020\001\022$\n" + + " MODIFY_NAMESPACE_UPDATE_NS_TABLE\020\002\022\036\n\032M" + + "ODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024DeleteNa" + + "mespaceState\022\034\n\030DELETE_NAMESPACE_PREPARE" + + "\020\001\022)\n%DELETE_NAMESPACE_DELETE_FROM_NS_TA", + "BLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_FROM_ZK" + + "\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIRECTORIE" + + "S\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NAMESPACE" + + "_QUOTA\020\005*\331\001\n\024AddColumnFamilyState\022\035\n\031ADD" + + "_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_COLUMN_F" + + "AMILY_PRE_OPERATION\020\002\022-\n)ADD_COLUMN_FAMI" + + "LY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n ADD_COLU" + + "MN_FAMILY_POST_OPERATION\020\004\022(\n$ADD_COLUMN" + + "_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027ModifyC" + + "olumnFamilyState\022 \n\034MODIFY_COLUMN_FAMILY", + "_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMILY_PRE_O" + + "PERATION\020\002\0220\n,MODIFY_COLUMN_FAMILY_UPDAT" + + "E_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_COLUMN_FA" + + "MILY_POST_OPERATION\020\004\022+\n\'MODIFY_COLUMN_F" + + "AMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027DeleteCol" + + "umnFamilyState\022 \n\034DELETE_COLUMN_FAMILY_P" + + "REPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_PRE_OPE" + + "RATION\020\002\0220\n,DELETE_COLUMN_FAMILY_UPDATE_" + + "TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLUMN_FAMI" + + "LY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_COLUMN_F", + "AMILY_POST_OPERATION\020\005\022+\n\'DELETE_COLUMN_" + + "FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020EnableTa" + + "bleState\022\030\n\024ENABLE_TABLE_PREPARE\020\001\022\036\n\032EN" + + "ABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENABLE_TAB" + + "LE_SET_ENABLING_TABLE_STATE\020\003\022$\n ENABLE_" + + "TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$ENABLE_TA" + + "BLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033ENABLE_" + + "TABLE_POST_OPERATION\020\006*\362\001\n\021DisableTableS" + + "tate\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037\n\033DISAB" + + "LE_TABLE_PRE_OPERATION\020\002\022+\n\'DISABLE_TABL", + "E_SET_DISABLING_TABLE_STATE\020\003\022&\n\"DISABLE" + + "_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&DISABLE" + + "_TABLE_SET_DISABLED_TABLE_STATE\020\005\022 \n\034DIS" + + "ABLE_TABLE_POST_OPERATION\020\006*\346\001\n\022CloneSna" + + "pshotState\022 \n\034CLONE_SNAPSHOT_PRE_OPERATI" + + "ON\020\001\022\"\n\036CLONE_SNAPSHOT_WRITE_FS_LAYOUT\020\002" + + "\022\036\n\032CLONE_SNAPSHOT_ADD_TO_META\020\003\022!\n\035CLON" + + "E_SNAPSHOT_ASSIGN_REGIONS\020\004\022$\n CLONE_SNA" + + "PSHOT_UPDATE_DESC_CACHE\020\005\022!\n\035CLONE_SNAPS" + + "HOT_POST_OPERATION\020\006*\260\001\n\024RestoreSnapshot", + "State\022\"\n\036RESTORE_SNAPSHOT_PRE_OPERATION\020" + + "\001\022,\n(RESTORE_SNAPSHOT_UPDATE_TABLE_DESCR" + + "IPTOR\020\002\022$\n RESTORE_SNAPSHOT_WRITE_FS_LAY" + + "OUT\020\003\022 \n\034RESTORE_SNAPSHOT_UPDATE_META\020\004*" + + "\376\001\n\033DispatchMergingRegionsState\022$\n DISPA" + + "TCH_MERGING_REGIONS_PREPARE\020\001\022*\n&DISPATC" + + "H_MERGING_REGIONS_PRE_OPERATION\020\002\0223\n/DIS" + + "PATCH_MERGING_REGIONS_MOVE_REGION_TO_SAM" + + "E_RS\020\003\022+\n\'DISPATCH_MERGING_REGIONS_DO_ME" + + "RGE_IN_RS\020\004\022+\n\'DISPATCH_MERGING_REGIONS_", + "POST_OPERATION\020\005*\376\003\n\026MergeTableRegionsSt" + + "ate\022\037\n\033MERGE_TABLE_REGIONS_PREPARE\020\001\022.\n*" + + "MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_" + + "RS\020\002\022+\n\'MERGE_TABLE_REGIONS_PRE_MERGE_OP" + + "ERATION\020\003\022/\n+MERGE_TABLE_REGIONS_SET_MER" + + "GING_TABLE_STATE\020\004\022%\n!MERGE_TABLE_REGION" + + "S_CLOSE_REGIONS\020\005\022,\n(MERGE_TABLE_REGIONS" + + "_CREATE_MERGED_REGION\020\006\0222\n.MERGE_TABLE_R" + + "EGIONS_PRE_MERGE_COMMIT_OPERATION\020\007\022#\n\037M" + + "ERGE_TABLE_REGIONS_UPDATE_META\020\010\0223\n/MERG", + "E_TABLE_REGIONS_POST_MERGE_COMMIT_OPERAT" + + "ION\020\t\022*\n&MERGE_TABLE_REGIONS_OPEN_MERGED" + + "_REGION\020\n\022&\n\"MERGE_TABLE_REGIONS_POST_OP" + + "ERATION\020\013*\304\003\n\025SplitTableRegionState\022\036\n\032S" + + "PLIT_TABLE_REGION_PREPARE\020\001\022$\n SPLIT_TAB" + + "LE_REGION_PRE_OPERATION\020\002\0220\n,SPLIT_TABLE" + + "_REGION_SET_SPLITTING_TABLE_STATE\020\003\022*\n&S" + + "PLIT_TABLE_REGION_CLOSE_PARENT_REGION\020\004\022" + + ".\n*SPLIT_TABLE_REGION_CREATE_DAUGHTER_RE" + + "GIONS\020\005\0220\n,SPLIT_TABLE_REGION_PRE_OPERAT", + "ION_BEFORE_PONR\020\006\022\"\n\036SPLIT_TABLE_REGION_" + + "UPDATE_META\020\007\022/\n+SPLIT_TABLE_REGION_PRE_" + + "OPERATION_AFTER_PONR\020\010\022)\n%SPLIT_TABLE_RE" + + "GION_OPEN_CHILD_REGIONS\020\t\022%\n!SPLIT_TABLE" + + "_REGION_POST_OPERATION\020\n*\234\002\n\020ServerCrash" + + "State\022\026\n\022SERVER_CRASH_START\020\001\022\035\n\031SERVER_" + + "CRASH_PROCESS_META\020\002\022\034\n\030SERVER_CRASH_GET" + + "_REGIONS\020\003\022\036\n\032SERVER_CRASH_NO_SPLIT_LOGS" + + "\020\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020\005\022#\n\037SERVE" + + "R_CRASH_PREPARE_LOG_REPLAY\020\006\022\027\n\023SERVER_C", + "RASH_ASSIGN\020\010\022\037\n\033SERVER_CRASH_WAIT_ON_AS" + + "SIGN\020\t\022\027\n\023SERVER_CRASH_FINISH\020dBR\n1org.a" + + "pache.hadoop.hbase.shaded.protobuf.gener" + + "atedB\025MasterProcedureProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -24311,14 +25775,20 @@ public final class MasterProcedureProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DispatchMergingRegionsStateData_descriptor, new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", "Forcible", }); - internal_static_hbase_pb_SplitTableRegionStateData_descriptor = + internal_static_hbase_pb_MergeTableRegionsStateData_descriptor = getDescriptor().getMessageTypes().get(16); + internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_MergeTableRegionsStateData_descriptor, + new java.lang.String[] { "UserInfo", "RegionInfo", "MergedRegionInfo", "Forcible", }); + internal_static_hbase_pb_SplitTableRegionStateData_descriptor = + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SplitTableRegionStateData_descriptor, new java.lang.String[] { "UserInfo", "ParentRegionInfo", "ChildRegionInfo", }); internal_static_hbase_pb_ServerCrashStateData_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ServerCrashStateData_descriptor, diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java index 03ef208..56442d1 100644 --- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java +++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java @@ -7053,6 +7053,1504 @@ public final class MasterProtos { } + public interface MergeTableRegionsRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + java.util.List + getRegionList(); + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index); + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + int getRegionCount(); + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + java.util.List + getRegionOrBuilderList(); + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index); + + /** + * optional bool forcible = 3 [default = false]; + */ + boolean hasForcible(); + /** + * optional bool forcible = 3 [default = false]; + */ + boolean getForcible(); + + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + boolean hasNonceGroup(); + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + long getNonceGroup(); + + /** + * optional uint64 nonce = 5 [default = 0]; + */ + boolean hasNonce(); + /** + * optional uint64 nonce = 5 [default = 0]; + */ + long getNonce(); + } + /** + *
+   **
+   * Merging the specified regions in a table.
+   * 
+ * + * Protobuf type {@code hbase.pb.MergeTableRegionsRequest} + */ + public static final class MergeTableRegionsRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsRequest) + MergeTableRegionsRequestOrBuilder { + // Use MergeTableRegionsRequest.newBuilder() to construct. + private MergeTableRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private MergeTableRegionsRequest() { + region_ = java.util.Collections.emptyList(); + forcible_ = false; + nonceGroup_ = 0L; + nonce_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MergeTableRegionsRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + region_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry)); + break; + } + case 24: { + bitField0_ |= 0x00000001; + forcible_ = input.readBool(); + break; + } + case 32: { + bitField0_ |= 0x00000002; + nonceGroup_ = input.readUInt64(); + break; + } + case 40: { + bitField0_ |= 0x00000004; + nonce_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class); + } + + private int bitField0_; + public static final int REGION_FIELD_NUMBER = 1; + private java.util.List region_; + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public java.util.List getRegionList() { + return region_; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public java.util.List + getRegionOrBuilderList() { + return region_; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public int getRegionCount() { + return region_.size(); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + return region_.get(index); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + return region_.get(index); + } + + public static final int FORCIBLE_FIELD_NUMBER = 3; + private boolean forcible_; + /** + * optional bool forcible = 3 [default = false]; + */ + public boolean hasForcible() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bool forcible = 3 [default = false]; + */ + public boolean getForcible() { + return forcible_; + } + + public static final int NONCE_GROUP_FIELD_NUMBER = 4; + private long nonceGroup_; + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + + public static final int NONCE_FIELD_NUMBER = 5; + private long nonce_; + /** + * optional uint64 nonce = 5 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 nonce = 5 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < region_.size(); i++) { + output.writeMessage(1, region_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(3, forcible_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(4, nonceGroup_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(5, nonce_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < region_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBoolSize(3, forcible_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, nonceGroup_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, nonce_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) obj; + + boolean result = true; + result = result && getRegionList() + .equals(other.getRegionList()); + result = result && (hasForcible() == other.hasForcible()); + if (hasForcible()) { + result = result && (getForcible() + == other.getForcible()); + } + result = result && (hasNonceGroup() == other.hasNonceGroup()); + if (hasNonceGroup()) { + result = result && (getNonceGroup() + == other.getNonceGroup()); + } + result = result && (hasNonce() == other.hasNonce()); + if (hasNonce()) { + result = result && (getNonce() + == other.getNonce()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getRegionCount() > 0) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegionList().hashCode(); + } + if (hasForcible()) { + hash = (37 * hash) + FORCIBLE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( + getForcible()); + } + if (hasNonceGroup()) { + hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getNonceGroup()); + } + if (hasNonce()) { + hash = (37 * hash) + NONCE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getNonce()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     **
+     * Merging the specified regions in a table.
+     * 
+ * + * Protobuf type {@code hbase.pb.MergeTableRegionsRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.MergeTableRegionsRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + regionBuilder_.clear(); + } + forcible_ = false; + bitField0_ = (bitField0_ & ~0x00000002); + nonceGroup_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + nonce_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = java.util.Collections.unmodifiableList(region_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.forcible_ = forcible_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.nonceGroup_ = nonceGroup_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.nonce_ = nonce_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance()) return this; + if (regionBuilder_ == null) { + if (!other.region_.isEmpty()) { + if (region_.isEmpty()) { + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRegionIsMutable(); + region_.addAll(other.region_); + } + onChanged(); + } + } else { + if (!other.region_.isEmpty()) { + if (regionBuilder_.isEmpty()) { + regionBuilder_.dispose(); + regionBuilder_ = null; + region_ = other.region_; + bitField0_ = (bitField0_ & ~0x00000001); + regionBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getRegionFieldBuilder() : null; + } else { + regionBuilder_.addAllMessages(other.region_); + } + } + } + if (other.hasForcible()) { + setForcible(other.getForcible()); + } + if (other.hasNonceGroup()) { + setNonceGroup(other.getNonceGroup()); + } + if (other.hasNonce()) { + setNonce(other.getNonce()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getRegionCount(); i++) { + if (!getRegion(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List region_ = + java.util.Collections.emptyList(); + private void ensureRegionIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + region_ = new java.util.ArrayList(region_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public java.util.List getRegionList() { + if (regionBuilder_ == null) { + return java.util.Collections.unmodifiableList(region_); + } else { + return regionBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public int getRegionCount() { + if (regionBuilder_ == null) { + return region_.size(); + } else { + return regionBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + if (regionBuilder_ == null) { + return region_.get(index); + } else { + return regionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.set(index, value); + onChanged(); + } else { + regionBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.set(index, builderForValue.build()); + onChanged(); + } else { + regionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.add(value); + onChanged(); + } else { + regionBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionIsMutable(); + region_.add(index, value); + onChanged(); + } else { + regionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(builderForValue.build()); + onChanged(); + } else { + regionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.add(index, builderForValue.build()); + onChanged(); + } else { + regionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder addAllRegion( + java.lang.Iterable values) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, region_); + onChanged(); + } else { + regionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + regionBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public Builder removeRegion(int index) { + if (regionBuilder_ == null) { + ensureRegionIsMutable(); + region_.remove(index); + onChanged(); + } else { + regionBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder( + int index) { + return getRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + int index) { + if (regionBuilder_ == null) { + return region_.get(index); } else { + return regionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public java.util.List + getRegionOrBuilderList() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(region_); + } + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder() { + return getRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder( + int index) { + return getRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionSpecifier region = 1; + */ + public java.util.List + getRegionBuilderList() { + return getRegionFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + private boolean forcible_ ; + /** + * optional bool forcible = 3 [default = false]; + */ + public boolean hasForcible() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool forcible = 3 [default = false]; + */ + public boolean getForcible() { + return forcible_; + } + /** + * optional bool forcible = 3 [default = false]; + */ + public Builder setForcible(boolean value) { + bitField0_ |= 0x00000002; + forcible_ = value; + onChanged(); + return this; + } + /** + * optional bool forcible = 3 [default = false]; + */ + public Builder clearForcible() { + bitField0_ = (bitField0_ & ~0x00000002); + forcible_ = false; + onChanged(); + return this; + } + + private long nonceGroup_ ; + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + public Builder setNonceGroup(long value) { + bitField0_ |= 0x00000004; + nonceGroup_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce_group = 4 [default = 0]; + */ + public Builder clearNonceGroup() { + bitField0_ = (bitField0_ & ~0x00000004); + nonceGroup_ = 0L; + onChanged(); + return this; + } + + private long nonce_ ; + /** + * optional uint64 nonce = 5 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint64 nonce = 5 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + /** + * optional uint64 nonce = 5 [default = 0]; + */ + public Builder setNonce(long value) { + bitField0_ |= 0x00000008; + nonce_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce = 5 [default = 0]; + */ + public Builder clearNonce() { + bitField0_ = (bitField0_ & ~0x00000008); + nonce_ = 0L; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.MergeTableRegionsRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MergeTableRegionsRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public MergeTableRegionsRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new MergeTableRegionsRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface MergeTableRegionsResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + } + /** + * Protobuf type {@code hbase.pb.MergeTableRegionsResponse} + */ + public static final class MergeTableRegionsResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsResponse) + MergeTableRegionsResponseOrBuilder { + // Use MergeTableRegionsResponse.newBuilder() to construct. + private MergeTableRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private MergeTableRegionsResponse() { + procId_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MergeTableRegionsResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.Builder.class); + } + + private int bitField0_; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getProcId()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MergeTableRegionsResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.MergeTableRegionsResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.MergeTableRegionsResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MergeTableRegionsResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public MergeTableRegionsResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new MergeTableRegionsResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + public interface AssignRegionRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.AssignRegionRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -64243,6 +65741,18 @@ public final class MasterProtos { /** *
+       ** Master merge the regions 
+       * 
+ * + * rpc MergeTableRegions(.hbase.pb.MergeTableRegionsRequest) returns (.hbase.pb.MergeTableRegionsResponse); + */ + public abstract void mergeTableRegions( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + *
        ** Assign a region to a server chosen at random. 
        * 
* @@ -64920,6 +66430,14 @@ public final class MasterProtos { } @java.lang.Override + public void mergeTableRegions( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.mergeTableRegions(controller, request, done); + } + + @java.lang.Override public void assignRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest request, @@ -65338,98 +66856,100 @@ public final class MasterProtos { case 9: return impl.dispatchMergingRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest)request); case 10: - return impl.assignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest)request); + return impl.mergeTableRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)request); case 11: - return impl.unassignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest)request); + return impl.assignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest)request); case 12: - return impl.offlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest)request); + return impl.unassignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest)request); case 13: - return impl.deleteTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest)request); + return impl.offlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest)request); case 14: - return impl.truncateTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest)request); + return impl.deleteTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest)request); case 15: - return impl.enableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest)request); + return impl.truncateTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest)request); case 16: - return impl.disableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest)request); + return impl.enableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest)request); case 17: - return impl.modifyTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest)request); + return impl.disableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest)request); case 18: - return impl.createTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest)request); + return impl.modifyTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest)request); case 19: - return impl.shutdown(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest)request); + return impl.createTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest)request); case 20: - return impl.stopMaster(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest)request); + return impl.shutdown(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest)request); case 21: - return impl.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request); + return impl.stopMaster(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest)request); case 22: - return impl.balance(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest)request); + return impl.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request); case 23: - return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request); + return impl.balance(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest)request); case 24: - return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request); + return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request); case 25: - return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request); + return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request); case 26: - return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request); + return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request); case 27: - return impl.normalize(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest)request); + return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request); case 28: - return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); + return impl.normalize(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest)request); case 29: - return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); + return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); case 30: - return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); + return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); case 31: - return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); + return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); case 32: - return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); + return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); case 33: - return impl.execMasterService(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); + return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); case 34: - return impl.snapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest)request); + return impl.execMasterService(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); case 35: - return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); + return impl.snapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest)request); case 36: - return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); + return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); case 37: - return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); + return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); case 38: - return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); + return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); case 39: - return impl.execProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); case 40: - return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.execProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 41: - return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); + return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 42: - return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); + return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); case 43: - return impl.createNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); + return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); case 44: - return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); + return impl.createNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); case 45: - return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); + return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); case 46: - return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); + return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); case 47: - return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); + return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); case 48: - return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); + return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); case 49: - return impl.getTableState(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest)request); + return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); case 50: - return impl.setQuota(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest)request); + return impl.getTableState(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest)request); case 51: - return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); + return impl.setQuota(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest)request); case 52: - return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); + return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); case 53: - return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); + return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); case 54: - return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); + return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); case 55: - return impl.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request); + return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); case 56: + return impl.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request); + case 57: return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -65466,98 +66986,100 @@ public final class MasterProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest.getDefaultInstance(); case 20: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 55: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); case 56: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -65594,98 +67116,100 @@ public final class MasterProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(); case 20: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 52: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 55: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); case 56: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -65817,6 +67341,18 @@ public final class MasterProtos { /** *
+     ** Master merge the regions 
+     * 
+ * + * rpc MergeTableRegions(.hbase.pb.MergeTableRegionsRequest) returns (.hbase.pb.MergeTableRegionsResponse); + */ + public abstract void mergeTableRegions( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + *
      ** Assign a region to a server chosen at random. 
      * 
* @@ -66481,236 +68017,241 @@ public final class MasterProtos { done)); return; case 10: + this.mergeTableRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 11: this.assignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 11: + case 12: this.unassignRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 13: this.offlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 14: this.deleteTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 15: this.truncateTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 16: this.enableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 16: + case 17: this.disableTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 17: + case 18: this.modifyTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 18: + case 19: this.createTable(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 19: + case 20: this.shutdown(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 20: + case 21: this.stopMaster(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 21: + case 22: this.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 22: + case 23: this.balance(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 23: + case 24: this.setBalancerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 24: + case 25: this.isBalancerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 25: + case 26: this.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 26: + case 27: this.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 27: + case 28: this.normalize(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 28: + case 29: this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 29: + case 30: this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 30: + case 31: this.runCatalogScan(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 31: + case 32: this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 32: + case 33: this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 33: + case 34: this.execMasterService(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 34: + case 35: this.snapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 35: + case 36: this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 36: + case 37: this.deleteSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 37: + case 38: this.isSnapshotDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 38: + case 39: this.restoreSnapshot(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 39: + case 40: this.execProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 40: + case 41: this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 41: + case 42: this.isProcedureDone(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 42: + case 43: this.modifyNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 43: + case 44: this.createNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 44: + case 45: this.deleteNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 45: + case 46: this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 46: + case 47: this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 47: + case 48: this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 48: + case 49: this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 49: + case 50: this.getTableState(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 50: + case 51: this.setQuota(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 51: + case 52: this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 52: + case 53: this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 53: + case 54: this.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 54: + case 55: this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 55: + case 56: this.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 56: + case 57: this.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -66750,98 +68291,100 @@ public final class MasterProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest.getDefaultInstance(); case 20: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 55: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); case 56: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -66878,98 +68421,100 @@ public final class MasterProtos { case 9: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(); case 20: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 40: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 52: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 55: - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); case 56: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + case 57: return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -67142,12 +68687,27 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance())); } + public void mergeTableRegions( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(10), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance())); + } + public void assignRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(), @@ -67162,7 +68722,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(), @@ -67177,7 +68737,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance(), @@ -67192,7 +68752,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance(), @@ -67207,7 +68767,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(), @@ -67222,7 +68782,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance(), @@ -67237,7 +68797,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance(), @@ -67252,7 +68812,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance(), @@ -67267,7 +68827,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance(), @@ -67282,7 +68842,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(), @@ -67297,7 +68857,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(20), + getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(), @@ -67312,7 +68872,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(21), + getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(), @@ -67327,7 +68887,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(22), + getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(), @@ -67342,7 +68902,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(), @@ -67357,7 +68917,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(), @@ -67372,7 +68932,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(), @@ -67387,7 +68947,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(), @@ -67402,7 +68962,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(), @@ -67417,7 +68977,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(), @@ -67432,7 +68992,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(), @@ -67447,7 +69007,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(), @@ -67462,7 +69022,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(), @@ -67477,7 +69037,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), @@ -67492,7 +69052,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(), @@ -67507,7 +69067,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(), @@ -67522,7 +69082,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(), @@ -67537,7 +69097,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(), @@ -67552,7 +69112,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(), @@ -67567,7 +69127,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(), @@ -67582,7 +69142,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -67597,7 +69157,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -67612,7 +69172,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(), @@ -67627,7 +69187,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(), @@ -67642,7 +69202,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(), @@ -67657,7 +69217,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(), @@ -67672,7 +69232,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(), @@ -67687,7 +69247,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(), @@ -67702,7 +69262,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(), @@ -67717,7 +69277,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(), @@ -67732,7 +69292,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), @@ -67747,7 +69307,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(), @@ -67762,7 +69322,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -67777,7 +69337,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -67792,7 +69352,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), @@ -67807,7 +69367,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(), @@ -67822,7 +69382,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(), @@ -67837,7 +69397,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(56), + getDescriptor().getMethods().get(57), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(), @@ -67904,6 +69464,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse mergeTableRegions( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse assignRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest request) @@ -68267,12 +69832,24 @@ public final class MasterProtos { } + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse mergeTableRegions( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(10), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse assignRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance()); @@ -68284,7 +69861,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance()); @@ -68296,7 +69873,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse.getDefaultInstance()); @@ -68308,7 +69885,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance()); @@ -68320,7 +69897,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance()); @@ -68332,7 +69909,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance()); @@ -68344,7 +69921,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance()); @@ -68356,7 +69933,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse.getDefaultInstance()); @@ -68368,7 +69945,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance()); @@ -68380,7 +69957,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance()); @@ -68392,7 +69969,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(20), + getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()); @@ -68404,7 +69981,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(21), + getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()); @@ -68416,7 +69993,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(22), + getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()); @@ -68428,7 +70005,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance()); @@ -68440,7 +70017,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance()); @@ -68452,7 +70029,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance()); @@ -68464,7 +70041,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance()); @@ -68476,7 +70053,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance()); @@ -68488,7 +70065,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance()); @@ -68500,7 +70077,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance()); @@ -68512,7 +70089,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); @@ -68524,7 +70101,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); @@ -68536,7 +70113,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); @@ -68548,7 +70125,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); @@ -68560,7 +70137,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); @@ -68572,7 +70149,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); @@ -68584,7 +70161,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); @@ -68596,7 +70173,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); @@ -68608,7 +70185,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); @@ -68620,7 +70197,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -68632,7 +70209,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -68644,7 +70221,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); @@ -68656,7 +70233,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); @@ -68668,7 +70245,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); @@ -68680,7 +70257,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); @@ -68692,7 +70269,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); @@ -68704,7 +70281,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); @@ -68716,7 +70293,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); @@ -68728,7 +70305,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); @@ -68740,7 +70317,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); @@ -68752,7 +70329,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); @@ -68764,7 +70341,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -68776,7 +70353,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -68788,7 +70365,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); @@ -68800,7 +70377,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); @@ -68812,7 +70389,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); @@ -68824,7 +70401,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(56), + getDescriptor().getMethods().get(57), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); @@ -68886,6 +70463,16 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_DispatchMergingRegionsResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MergeTableRegionsRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MergeTableRegionsResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_AssignRegionRequest_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -69425,310 +71012,317 @@ public final class MasterProtos { "(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcibl" + "e\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020" + "\n\005nonce\030\005 \001(\004:\0010\"1\n\036DispatchMergingRegio" + - "nsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023AssignReg" + - "ionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + - "gionSpecifier\"\026\n\024AssignRegionResponse\"X\n" + - "\025UnassignRegionRequest\022)\n\006region\030\001 \002(\0132\031", - ".hbase.pb.RegionSpecifier\022\024\n\005force\030\002 \001(\010" + - ":\005false\"\030\n\026UnassignRegionResponse\"A\n\024Off" + - "lineRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbas" + - "e.pb.RegionSpecifier\"\027\n\025OfflineRegionRes" + - "ponse\"\177\n\022CreateTableRequest\022+\n\014table_sch" + - "ema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\nspli" + - "t_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005" + - "nonce\030\004 \001(\004:\0010\"&\n\023CreateTableResponse\022\017\n" + - "\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest\022\'\n\n" + - "table_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n", - "\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"" + - "&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" + - "\207\001\n\024TruncateTableRequest\022&\n\ttableName\030\001 " + - "\002(\0132\023.hbase.pb.TableName\022\035\n\016preserveSpli" + - "ts\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004:\0010\022" + - "\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableRespon" + - "se\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableReques" + - "t\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNa" + - "me\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(" + - "\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_id\030\001", - " \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable_nam" + - "e\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_gr" + - "oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Disabl" + - "eTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022Modi" + - "fyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hba" + - "se.pb.TableName\022+\n\014table_schema\030\002 \002(\0132\025." + - "hbase.pb.TableSchema\022\026\n\013nonce_group\030\003 \001(" + - "\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTableRes" + - "ponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateNamespac" + - "eRequest\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.", - "hbase.pb.NamespaceDescriptor\022\026\n\013nonce_gr" + - "oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Create" + - "NamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026De" + - "leteNamespaceRequest\022\025\n\rnamespaceName\030\001 " + - "\002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001" + - "(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007proc" + - "_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022:\n\023n" + - "amespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Name" + - "spaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022" + - "\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespaceResp", - "onse\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespaceDes" + - "criptorRequest\022\025\n\rnamespaceName\030\001 \002(\t\"\\\n" + - "\036GetNamespaceDescriptorResponse\022:\n\023names" + - "paceDescriptor\030\001 \002(\0132\035.hbase.pb.Namespac" + - "eDescriptor\"!\n\037ListNamespaceDescriptorsR" + - "equest\"^\n ListNamespaceDescriptorsRespon" + - "se\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hbase." + - "pb.NamespaceDescriptor\"?\n&ListTableDescr" + - "iptorsByNamespaceRequest\022\025\n\rnamespaceNam" + - "e\030\001 \002(\t\"U\n\'ListTableDescriptorsByNamespa", - "ceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hbase." + - "pb.TableSchema\"9\n ListTableNamesByNamesp" + - "aceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n!Lis" + - "tTableNamesByNamespaceResponse\022&\n\ttableN" + - "ame\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Shutdo" + - "wnRequest\"\022\n\020ShutdownResponse\"\023\n\021StopMas" + - "terRequest\"\024\n\022StopMasterResponse\"\034\n\032IsIn" + - "MaintenanceModeRequest\"8\n\033IsInMaintenanc" + - "eModeResponse\022\031\n\021inMaintenanceMode\030\001 \002(\010" + - "\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n\017Bal", - "anceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031Se" + - "tBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013s" + - "ynchronous\030\002 \001(\010\"8\n\032SetBalancerRunningRe" + - "sponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\032\n\030Is" + - "BalancerEnabledRequest\",\n\031IsBalancerEnab" + - "ledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetSplit" + - "OrMergeEnabledRequest\022\017\n\007enabled\030\001 \002(\010\022\023" + - "\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030\003 \003(" + - "\0162\032.hbase.pb.MasterSwitchType\"4\n\036SetSpli" + - "tOrMergeEnabledResponse\022\022\n\nprev_value\030\001 ", - "\003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022/\n\013s" + - "witch_type\030\001 \002(\0162\032.hbase.pb.MasterSwitch" + - "Type\"0\n\035IsSplitOrMergeEnabledResponse\022\017\n" + - "\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+\n\021No" + - "rmalizeResponse\022\026\n\016normalizer_ran\030\001 \002(\010\"" + - ")\n\033SetNormalizerRunningRequest\022\n\n\002on\030\001 \002" + - "(\010\"=\n\034SetNormalizerRunningResponse\022\035\n\025pr" + - "ev_normalizer_value\030\001 \001(\010\"\034\n\032IsNormalize" + - "rEnabledRequest\".\n\033IsNormalizerEnabledRe" + - "sponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogSca", - "nRequest\"-\n\026RunCatalogScanResponse\022\023\n\013sc" + - "an_result\030\001 \001(\005\"-\n\033EnableCatalogJanitorR" + - "equest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJ" + - "anitorResponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036Is" + - "CatalogJanitorEnabledRequest\"0\n\037IsCatalo" + - "gJanitorEnabledResponse\022\r\n\005value\030\001 \002(\010\"B" + - "\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hb" + - "ase.pb.SnapshotDescription\",\n\020SnapshotRe" + - "sponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034GetC" + - "ompletedSnapshotsRequest\"Q\n\035GetCompleted", - "SnapshotsResponse\0220\n\tsnapshots\030\001 \003(\0132\035.h" + - "base.pb.SnapshotDescription\"H\n\025DeleteSna" + - "pshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.p" + - "b.SnapshotDescription\"\030\n\026DeleteSnapshotR" + - "esponse\"s\n\026RestoreSnapshotRequest\022/\n\010sna" + - "pshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescripti" + - "on\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(" + - "\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007proc_" + - "id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n\010sna" + - "pshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescripti", - "on\"^\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001" + - "(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.S" + - "napshotDescription\"O\n\034IsRestoreSnapshotD" + - "oneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb." + - "SnapshotDescription\"4\n\035IsRestoreSnapshot" + - "DoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033Get" + - "SchemaAlterStatusRequest\022\'\n\ntable_name\030\001" + - " \002(\0132\023.hbase.pb.TableName\"T\n\034GetSchemaAl" + - "terStatusResponse\022\035\n\025yet_to_update_regio" + - "ns\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetT", - "ableDescriptorsRequest\022(\n\013table_names\030\001 " + - "\003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022" + - "!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021\n\tna" + - "mespace\030\004 \001(\t\"J\n\033GetTableDescriptorsResp" + - "onse\022+\n\014table_schema\030\001 \003(\0132\025.hbase.pb.Ta" + - "bleSchema\"[\n\024GetTableNamesRequest\022\r\n\005reg" + - "ex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005fa" + - "lse\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesR" + - "esponse\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." + - "TableName\"?\n\024GetTableStateRequest\022\'\n\ntab", - "le_name\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025Ge" + - "tTableStateResponse\022)\n\013table_state\030\001 \002(\013" + - "2\024.hbase.pb.TableState\"\031\n\027GetClusterStat" + - "usRequest\"K\n\030GetClusterStatusResponse\022/\n" + - "\016cluster_status\030\001 \002(\0132\027.hbase.pb.Cluster" + - "Status\"\030\n\026IsMasterRunningRequest\"4\n\027IsMa" + - "sterRunningResponse\022\031\n\021is_master_running" + - "\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tproced" + - "ure\030\001 \002(\0132\036.hbase.pb.ProcedureDescriptio" + - "n\"F\n\025ExecProcedureResponse\022\030\n\020expected_t", - "imeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsP" + - "rocedureDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036" + - ".hbase.pb.ProcedureDescription\"`\n\027IsProc" + - "edureDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220" + - "\n\010snapshot\030\002 \001(\0132\036.hbase.pb.ProcedureDes" + - "cription\",\n\031GetProcedureResultRequest\022\017\n" + - "\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultRes" + - "ponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetProce" + - "dureResultResponse.State\022\022\n\nstart_time\030\002" + - " \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(", - "\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.ForeignE" + - "xceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022" + - "\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProce" + - "dureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInter" + - "ruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProced" + - "ureResponse\022\034\n\024is_procedure_aborted\030\001 \002(" + - "\010\"\027\n\025ListProceduresRequest\"@\n\026ListProced" + - "uresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase." + - "pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser" + - "_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnames", - "pace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.p" + - "b.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypas" + - "s_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbas" + - "e.pb.ThrottleRequest\"\022\n\020SetQuotaResponse" + - "\"J\n\037MajorCompactionTimestampRequest\022\'\n\nt" + - "able_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(" + - "MajorCompactionTimestampForRegionRequest" + - "\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecif" + - "ier\"@\n MajorCompactionTimestampResponse\022" + - "\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Securit", - "yCapabilitiesRequest\"\354\001\n\034SecurityCapabil" + - "itiesResponse\022G\n\014capabilities\030\001 \003(\01621.hb" + - "ase.pb.SecurityCapabilitiesResponse.Capa" + - "bility\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTI" + - "CATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rA" + - "UTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023" + - "\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchType\022" + - "\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022" + - "e\n\024GetSchemaAlterStatus\022%.hbase.pb.GetSc" + - "hemaAlterStatusRequest\032&.hbase.pb.GetSch", - "emaAlterStatusResponse\022b\n\023GetTableDescri" + - "ptors\022$.hbase.pb.GetTableDescriptorsRequ" + - "est\032%.hbase.pb.GetTableDescriptorsRespon" + - "se\022P\n\rGetTableNames\022\036.hbase.pb.GetTableN" + - "amesRequest\032\037.hbase.pb.GetTableNamesResp" + - "onse\022Y\n\020GetClusterStatus\022!.hbase.pb.GetC" + - "lusterStatusRequest\032\".hbase.pb.GetCluste" + - "rStatusResponse\022V\n\017IsMasterRunning\022 .hba" + - "se.pb.IsMasterRunningRequest\032!.hbase.pb." + - "IsMasterRunningResponse\022D\n\tAddColumn\022\032.h", - "base.pb.AddColumnRequest\032\033.hbase.pb.AddC" + - "olumnResponse\022M\n\014DeleteColumn\022\035.hbase.pb" + - ".DeleteColumnRequest\032\036.hbase.pb.DeleteCo" + - "lumnResponse\022M\n\014ModifyColumn\022\035.hbase.pb." + - "ModifyColumnRequest\032\036.hbase.pb.ModifyCol" + - "umnResponse\022G\n\nMoveRegion\022\033.hbase.pb.Mov" + - "eRegionRequest\032\034.hbase.pb.MoveRegionResp" + - "onse\022k\n\026DispatchMergingRegions\022\'.hbase.p" + - "b.DispatchMergingRegionsRequest\032(.hbase." + - "pb.DispatchMergingRegionsResponse\022M\n\014Ass", - "ignRegion\022\035.hbase.pb.AssignRegionRequest" + - "\032\036.hbase.pb.AssignRegionResponse\022S\n\016Unas" + - "signRegion\022\037.hbase.pb.UnassignRegionRequ" + - "est\032 .hbase.pb.UnassignRegionResponse\022P\n" + - "\rOfflineRegion\022\036.hbase.pb.OfflineRegionR" + - "equest\032\037.hbase.pb.OfflineRegionResponse\022" + - "J\n\013DeleteTable\022\034.hbase.pb.DeleteTableReq" + - "uest\032\035.hbase.pb.DeleteTableResponse\022P\n\rt" + - "runcateTable\022\036.hbase.pb.TruncateTableReq" + - "uest\032\037.hbase.pb.TruncateTableResponse\022J\n", - "\013EnableTable\022\034.hbase.pb.EnableTableReque" + - "st\032\035.hbase.pb.EnableTableResponse\022M\n\014Dis" + - "ableTable\022\035.hbase.pb.DisableTableRequest" + - "\032\036.hbase.pb.DisableTableResponse\022J\n\013Modi" + - "fyTable\022\034.hbase.pb.ModifyTableRequest\032\035." + - "hbase.pb.ModifyTableResponse\022J\n\013CreateTa" + - "ble\022\034.hbase.pb.CreateTableRequest\032\035.hbas" + - "e.pb.CreateTableResponse\022A\n\010Shutdown\022\031.h" + - "base.pb.ShutdownRequest\032\032.hbase.pb.Shutd" + - "ownResponse\022G\n\nStopMaster\022\033.hbase.pb.Sto", - "pMasterRequest\032\034.hbase.pb.StopMasterResp" + - "onse\022h\n\031IsMasterInMaintenanceMode\022$.hbas" + - "e.pb.IsInMaintenanceModeRequest\032%.hbase." + - "pb.IsInMaintenanceModeResponse\022>\n\007Balanc" + - "e\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.B" + - "alanceResponse\022_\n\022SetBalancerRunning\022#.h" + - "base.pb.SetBalancerRunningRequest\032$.hbas" + - "e.pb.SetBalancerRunningResponse\022\\\n\021IsBal" + - "ancerEnabled\022\".hbase.pb.IsBalancerEnable" + - "dRequest\032#.hbase.pb.IsBalancerEnabledRes", - "ponse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase." + - "pb.SetSplitOrMergeEnabledRequest\032(.hbase" + - ".pb.SetSplitOrMergeEnabledResponse\022h\n\025Is" + - "SplitOrMergeEnabled\022&.hbase.pb.IsSplitOr" + - "MergeEnabledRequest\032\'.hbase.pb.IsSplitOr" + - "MergeEnabledResponse\022D\n\tNormalize\022\032.hbas" + - "e.pb.NormalizeRequest\032\033.hbase.pb.Normali" + - "zeResponse\022e\n\024SetNormalizerRunning\022%.hba" + - "se.pb.SetNormalizerRunningRequest\032&.hbas" + - "e.pb.SetNormalizerRunningResponse\022b\n\023IsN", - "ormalizerEnabled\022$.hbase.pb.IsNormalizer" + - "EnabledRequest\032%.hbase.pb.IsNormalizerEn" + - "abledResponse\022S\n\016RunCatalogScan\022\037.hbase." + - "pb.RunCatalogScanRequest\032 .hbase.pb.RunC" + - "atalogScanResponse\022e\n\024EnableCatalogJanit" + - "or\022%.hbase.pb.EnableCatalogJanitorReques" + - "t\032&.hbase.pb.EnableCatalogJanitorRespons" + - "e\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb." + - "IsCatalogJanitorEnabledRequest\032).hbase.p" + - "b.IsCatalogJanitorEnabledResponse\022^\n\021Exe", - "cMasterService\022#.hbase.pb.CoprocessorSer" + - "viceRequest\032$.hbase.pb.CoprocessorServic" + - "eResponse\022A\n\010Snapshot\022\031.hbase.pb.Snapsho" + - "tRequest\032\032.hbase.pb.SnapshotResponse\022h\n\025" + - "GetCompletedSnapshots\022&.hbase.pb.GetComp" + - "letedSnapshotsRequest\032\'.hbase.pb.GetComp" + - "letedSnapshotsResponse\022S\n\016DeleteSnapshot" + - "\022\037.hbase.pb.DeleteSnapshotRequest\032 .hbas" + - "e.pb.DeleteSnapshotResponse\022S\n\016IsSnapsho" + - "tDone\022\037.hbase.pb.IsSnapshotDoneRequest\032 ", - ".hbase.pb.IsSnapshotDoneResponse\022V\n\017Rest" + - "oreSnapshot\022 .hbase.pb.RestoreSnapshotRe" + - "quest\032!.hbase.pb.RestoreSnapshotResponse" + - "\022P\n\rExecProcedure\022\036.hbase.pb.ExecProcedu" + - "reRequest\032\037.hbase.pb.ExecProcedureRespon" + - "se\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Ex" + - "ecProcedureRequest\032\037.hbase.pb.ExecProced" + - "ureResponse\022V\n\017IsProcedureDone\022 .hbase.p" + - "b.IsProcedureDoneRequest\032!.hbase.pb.IsPr" + - "ocedureDoneResponse\022V\n\017ModifyNamespace\022 ", - ".hbase.pb.ModifyNamespaceRequest\032!.hbase" + - ".pb.ModifyNamespaceResponse\022V\n\017CreateNam" + - "espace\022 .hbase.pb.CreateNamespaceRequest" + - "\032!.hbase.pb.CreateNamespaceResponse\022V\n\017D" + - "eleteNamespace\022 .hbase.pb.DeleteNamespac" + - "eRequest\032!.hbase.pb.DeleteNamespaceRespo" + - "nse\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb" + - ".GetNamespaceDescriptorRequest\032(.hbase.p" + - "b.GetNamespaceDescriptorResponse\022q\n\030List" + - "NamespaceDescriptors\022).hbase.pb.ListName", - "spaceDescriptorsRequest\032*.hbase.pb.ListN" + - "amespaceDescriptorsResponse\022\206\001\n\037ListTabl" + - "eDescriptorsByNamespace\0220.hbase.pb.ListT" + - "ableDescriptorsByNamespaceRequest\0321.hbas" + - "e.pb.ListTableDescriptorsByNamespaceResp" + - "onse\022t\n\031ListTableNamesByNamespace\022*.hbas" + - "e.pb.ListTableNamesByNamespaceRequest\032+." + - "hbase.pb.ListTableNamesByNamespaceRespon" + - "se\022P\n\rGetTableState\022\036.hbase.pb.GetTableS" + - "tateRequest\032\037.hbase.pb.GetTableStateResp", - "onse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaRequ" + - "est\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLa" + - "stMajorCompactionTimestamp\022).hbase.pb.Ma" + - "jorCompactionTimestampRequest\032*.hbase.pb" + - ".MajorCompactionTimestampResponse\022\212\001\n(ge" + - "tLastMajorCompactionTimestampForRegion\0222" + - ".hbase.pb.MajorCompactionTimestampForReg" + - "ionRequest\032*.hbase.pb.MajorCompactionTim" + - "estampResponse\022_\n\022getProcedureResult\022#.h" + - "base.pb.GetProcedureResultRequest\032$.hbas", - "e.pb.GetProcedureResultResponse\022h\n\027getSe" + - "curityCapabilities\022%.hbase.pb.SecurityCa" + - "pabilitiesRequest\032&.hbase.pb.SecurityCap" + - "abilitiesResponse\022S\n\016AbortProcedure\022\037.hb" + - "ase.pb.AbortProcedureRequest\032 .hbase.pb." + - "AbortProcedureResponse\022S\n\016ListProcedures" + - "\022\037.hbase.pb.ListProceduresRequest\032 .hbas" + - "e.pb.ListProceduresResponseBI\n1org.apach" + - "e.hadoop.hbase.shaded.protobuf.generated" + - "B\014MasterProtosH\001\210\001\001\240\001\001" + "nsResponse\022\017\n\007proc_id\030\001 \001(\004\"\210\001\n\030MergeTab" + + "leRegionsRequest\022)\n\006region\030\001 \003(\0132\031.hbase" + + ".pb.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005f" + + "alse\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 ", + "\001(\004:\0010\",\n\031MergeTableRegionsResponse\022\017\n\007p" + + "roc_id\030\001 \001(\004\"@\n\023AssignRegionRequest\022)\n\006r" + + "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\026" + + "\n\024AssignRegionResponse\"X\n\025UnassignRegion" + + "Request\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" + + "nSpecifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unas" + + "signRegionResponse\"A\n\024OfflineRegionReque" + + "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" + + "ifier\"\027\n\025OfflineRegionResponse\"\177\n\022Create" + + "TableRequest\022+\n\014table_schema\030\001 \002(\0132\025.hba", + "se.pb.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n" + + "\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"" + + "&\n\023CreateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" + + "g\n\022DeleteTableRequest\022\'\n\ntable_name\030\001 \002(" + + "\0132\023.hbase.pb.TableName\022\026\n\013nonce_group\030\002 " + + "\001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023DeleteTableR" + + "esponse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024TruncateTab" + + "leRequest\022&\n\ttableName\030\001 \002(\0132\023.hbase.pb." + + "TableName\022\035\n\016preserveSplits\030\002 \001(\010:\005false" + + "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:", + "\0010\"(\n\025TruncateTableResponse\022\017\n\007proc_id\030\001" + + " \001(\004\"g\n\022EnableTableRequest\022\'\n\ntable_name" + + "\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_gro" + + "up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableT" + + "ableResponse\022\017\n\007proc_id\030\001 \001(\004\"h\n\023Disable" + + "TableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase" + + ".pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" + + "\005nonce\030\003 \001(\004:\0010\"\'\n\024DisableTableResponse\022" + + "\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022ModifyTableRequest\022" + + "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName", + "\022+\n\014table_schema\030\002 \002(\0132\025.hbase.pb.TableS" + + "chema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" + + " \001(\004:\0010\"&\n\023ModifyTableResponse\022\017\n\007proc_i" + + "d\030\001 \001(\004\"~\n\026CreateNamespaceRequest\022:\n\023nam" + + "espaceDescriptor\030\001 \002(\0132\035.hbase.pb.Namesp" + + "aceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" + + "\005nonce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceRespon" + + "se\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceRe" + + "quest\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_gr" + + "oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Delete", + "NamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Mo" + + "difyNamespaceRequest\022:\n\023namespaceDescrip" + + "tor\030\001 \002(\0132\035.hbase.pb.NamespaceDescriptor" + + "\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:" + + "\0010\"*\n\027ModifyNamespaceResponse\022\017\n\007proc_id" + + "\030\001 \001(\004\"6\n\035GetNamespaceDescriptorRequest\022" + + "\025\n\rnamespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDe" + + "scriptorResponse\022:\n\023namespaceDescriptor\030" + + "\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037" + + "ListNamespaceDescriptorsRequest\"^\n ListN", + "amespaceDescriptorsResponse\022:\n\023namespace" + + "Descriptor\030\001 \003(\0132\035.hbase.pb.NamespaceDes" + + "criptor\"?\n&ListTableDescriptorsByNamespa" + + "ceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'List" + + "TableDescriptorsByNamespaceResponse\022*\n\013t" + + "ableSchema\030\001 \003(\0132\025.hbase.pb.TableSchema\"" + + "9\n ListTableNamesByNamespaceRequest\022\025\n\rn" + + "amespaceName\030\001 \002(\t\"K\n!ListTableNamesByNa" + + "mespaceResponse\022&\n\ttableName\030\001 \003(\0132\023.hba" + + "se.pb.TableName\"\021\n\017ShutdownRequest\"\022\n\020Sh", + "utdownResponse\"\023\n\021StopMasterRequest\"\024\n\022S" + + "topMasterResponse\"\034\n\032IsInMaintenanceMode" + + "Request\"8\n\033IsInMaintenanceModeResponse\022\031" + + "\n\021inMaintenanceMode\030\001 \002(\010\"\037\n\016BalanceRequ" + + "est\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n" + + "\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunnin" + + "gRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(" + + "\010\"8\n\032SetBalancerRunningResponse\022\032\n\022prev_" + + "balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabled" + + "Request\",\n\031IsBalancerEnabledResponse\022\017\n\007", + "enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeEnabledR" + + "equest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002" + + " \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb.Ma" + + "sterSwitchType\"4\n\036SetSplitOrMergeEnabled" + + "Response\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSplitO" + + "rMergeEnabledRequest\022/\n\013switch_type\030\001 \002(" + + "\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSplit" + + "OrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"" + + "\022\n\020NormalizeRequest\"+\n\021NormalizeResponse" + + "\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormalize", + "rRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormal" + + "izerRunningResponse\022\035\n\025prev_normalizer_v" + + "alue\030\001 \001(\010\"\034\n\032IsNormalizerEnabledRequest" + + "\".\n\033IsNormalizerEnabledResponse\022\017\n\007enabl" + + "ed\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026Run" + + "CatalogScanResponse\022\023\n\013scan_result\030\001 \001(\005" + + "\"-\n\033EnableCatalogJanitorRequest\022\016\n\006enabl" + + "e\030\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022" + + "\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorE" + + "nabledRequest\"0\n\037IsCatalogJanitorEnabled", + "Response\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReque" + + "st\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapshot" + + "Description\",\n\020SnapshotResponse\022\030\n\020expec" + + "ted_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapsho" + + "tsRequest\"Q\n\035GetCompletedSnapshotsRespon" + + "se\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snapsho" + + "tDescription\"H\n\025DeleteSnapshotRequest\022/\n" + + "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" + + "iption\"\030\n\026DeleteSnapshotResponse\"s\n\026Rest" + + "oreSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.h", + "base.pb.SnapshotDescription\022\026\n\013nonce_gro" + + "up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Restore" + + "SnapshotResponse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsS" + + "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" + + "base.pb.SnapshotDescription\"^\n\026IsSnapsho" + + "tDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010sn" + + "apshot\030\002 \001(\0132\035.hbase.pb.SnapshotDescript" + + "ion\"O\n\034IsRestoreSnapshotDoneRequest\022/\n\010s" + + "napshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescrip" + + "tion\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n", + "\004done\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStat" + + "usRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" + + ".TableName\"T\n\034GetSchemaAlterStatusRespon" + + "se\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtot" + + "al_regions\030\002 \001(\r\"\213\001\n\032GetTableDescriptors" + + "Request\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." + + "TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_" + + "tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J" + + "\n\033GetTableDescriptorsResponse\022+\n\014table_s" + + "chema\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024Ge", + "tTableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022inc" + + "lude_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespac" + + "e\030\003 \001(\t\"A\n\025GetTableNamesResponse\022(\n\013tabl" + + "e_names\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024Ge" + + "tTableStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023" + + ".hbase.pb.TableName\"B\n\025GetTableStateResp" + + "onse\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.Tab" + + "leState\"\031\n\027GetClusterStatusRequest\"K\n\030Ge" + + "tClusterStatusResponse\022/\n\016cluster_status" + + "\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMas", + "terRunningRequest\"4\n\027IsMasterRunningResp" + + "onse\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecP" + + "rocedureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hba" + + "se.pb.ProcedureDescription\"F\n\025ExecProced" + + "ureResponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n" + + "\013return_data\030\002 \001(\014\"K\n\026IsProcedureDoneReq" + + "uest\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Proce" + + "dureDescription\"`\n\027IsProcedureDoneRespon" + + "se\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(" + + "\0132\036.hbase.pb.ProcedureDescription\",\n\031Get", + "ProcedureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"" + + "\371\001\n\032GetProcedureResultResponse\0229\n\005state\030" + + "\001 \002(\0162*.hbase.pb.GetProcedureResultRespo" + + "nse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_up" + + "date\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030" + + "\005 \001(\0132!.hbase.pb.ForeignExceptionMessage" + + "\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n" + + "\010FINISHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007" + + "proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002" + + " \001(\010:\004true\"6\n\026AbortProcedureResponse\022\034\n\024", + "is_procedure_aborted\030\001 \002(\010\"\027\n\025ListProced" + + "uresRequest\"@\n\026ListProceduresResponse\022&\n" + + "\tprocedure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001" + + "\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\n" + + "user_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\nt" + + "able_name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\n" + + "remove_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010" + + "\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.ThrottleRe" + + "quest\"\022\n\020SetQuotaResponse\"J\n\037MajorCompac" + + "tionTimestampRequest\022\'\n\ntable_name\030\001 \002(\013", + "2\023.hbase.pb.TableName\"U\n(MajorCompaction" + + "TimestampForRegionRequest\022)\n\006region\030\001 \002(" + + "\0132\031.hbase.pb.RegionSpecifier\"@\n MajorCom" + + "pactionTimestampResponse\022\034\n\024compaction_t" + + "imestamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRe" + + "quest\"\354\001\n\034SecurityCapabilitiesResponse\022G" + + "\n\014capabilities\030\001 \003(\01621.hbase.pb.Security" + + "CapabilitiesResponse.Capability\"\202\001\n\nCapa" + + "bility\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SEC" + + "URE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022", + "\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILI" + + "TY\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005M" + + "ERGE\020\0012\261)\n\rMasterService\022e\n\024GetSchemaAlt" + + "erStatus\022%.hbase.pb.GetSchemaAlterStatus" + + "Request\032&.hbase.pb.GetSchemaAlterStatusR" + + "esponse\022b\n\023GetTableDescriptors\022$.hbase.p" + + "b.GetTableDescriptorsRequest\032%.hbase.pb." + + "GetTableDescriptorsResponse\022P\n\rGetTableN" + + "ames\022\036.hbase.pb.GetTableNamesRequest\032\037.h" + + "base.pb.GetTableNamesResponse\022Y\n\020GetClus", + "terStatus\022!.hbase.pb.GetClusterStatusReq" + + "uest\032\".hbase.pb.GetClusterStatusResponse" + + "\022V\n\017IsMasterRunning\022 .hbase.pb.IsMasterR" + + "unningRequest\032!.hbase.pb.IsMasterRunning" + + "Response\022D\n\tAddColumn\022\032.hbase.pb.AddColu" + + "mnRequest\032\033.hbase.pb.AddColumnResponse\022M" + + "\n\014DeleteColumn\022\035.hbase.pb.DeleteColumnRe" + + "quest\032\036.hbase.pb.DeleteColumnResponse\022M\n" + + "\014ModifyColumn\022\035.hbase.pb.ModifyColumnReq" + + "uest\032\036.hbase.pb.ModifyColumnResponse\022G\n\n", + "MoveRegion\022\033.hbase.pb.MoveRegionRequest\032" + + "\034.hbase.pb.MoveRegionResponse\022k\n\026Dispatc" + + "hMergingRegions\022\'.hbase.pb.DispatchMergi" + + "ngRegionsRequest\032(.hbase.pb.DispatchMerg" + + "ingRegionsResponse\022\\\n\021MergeTableRegions\022" + + "\".hbase.pb.MergeTableRegionsRequest\032#.hb" + + "ase.pb.MergeTableRegionsResponse\022M\n\014Assi" + + "gnRegion\022\035.hbase.pb.AssignRegionRequest\032" + + "\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" + + "ignRegion\022\037.hbase.pb.UnassignRegionReque", + "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r" + + "OfflineRegion\022\036.hbase.pb.OfflineRegionRe" + + "quest\032\037.hbase.pb.OfflineRegionResponse\022J" + + "\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" + + "est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" + + "uncateTable\022\036.hbase.pb.TruncateTableRequ" + + "est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" + + "EnableTable\022\034.hbase.pb.EnableTableReques" + + "t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" + + "bleTable\022\035.hbase.pb.DisableTableRequest\032", + "\036.hbase.pb.DisableTableResponse\022J\n\013Modif" + + "yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" + + "base.pb.ModifyTableResponse\022J\n\013CreateTab" + + "le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" + + ".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" + + "ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" + + "wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" + + "MasterRequest\032\034.hbase.pb.StopMasterRespo" + + "nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" + + ".pb.IsInMaintenanceModeRequest\032%.hbase.p", + "b.IsInMaintenanceModeResponse\022>\n\007Balance" + + "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" + + "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" + + "ase.pb.SetBalancerRunningRequest\032$.hbase" + + ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" + + "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" + + "Request\032#.hbase.pb.IsBalancerEnabledResp" + + "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" + + "b.SetSplitOrMergeEnabledRequest\032(.hbase." + + "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS", + "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM" + + "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" + + "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" + + ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" + + "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" + + "e.pb.SetNormalizerRunningRequest\032&.hbase" + + ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" + + "rmalizerEnabled\022$.hbase.pb.IsNormalizerE" + + "nabledRequest\032%.hbase.pb.IsNormalizerEna" + + "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p", + "b.RunCatalogScanRequest\032 .hbase.pb.RunCa" + + "talogScanResponse\022e\n\024EnableCatalogJanito" + + "r\022%.hbase.pb.EnableCatalogJanitorRequest" + + "\032&.hbase.pb.EnableCatalogJanitorResponse" + + "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" + + "sCatalogJanitorEnabledRequest\032).hbase.pb" + + ".IsCatalogJanitorEnabledResponse\022^\n\021Exec" + + "MasterService\022#.hbase.pb.CoprocessorServ" + + "iceRequest\032$.hbase.pb.CoprocessorService" + + "Response\022A\n\010Snapshot\022\031.hbase.pb.Snapshot", + "Request\032\032.hbase.pb.SnapshotResponse\022h\n\025G" + + "etCompletedSnapshots\022&.hbase.pb.GetCompl" + + "etedSnapshotsRequest\032\'.hbase.pb.GetCompl" + + "etedSnapshotsResponse\022S\n\016DeleteSnapshot\022" + + "\037.hbase.pb.DeleteSnapshotRequest\032 .hbase" + + ".pb.DeleteSnapshotResponse\022S\n\016IsSnapshot" + + "Done\022\037.hbase.pb.IsSnapshotDoneRequest\032 ." + + "hbase.pb.IsSnapshotDoneResponse\022V\n\017Resto" + + "reSnapshot\022 .hbase.pb.RestoreSnapshotReq" + + "uest\032!.hbase.pb.RestoreSnapshotResponse\022", + "P\n\rExecProcedure\022\036.hbase.pb.ExecProcedur" + + "eRequest\032\037.hbase.pb.ExecProcedureRespons" + + "e\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exe" + + "cProcedureRequest\032\037.hbase.pb.ExecProcedu" + + "reResponse\022V\n\017IsProcedureDone\022 .hbase.pb" + + ".IsProcedureDoneRequest\032!.hbase.pb.IsPro" + + "cedureDoneResponse\022V\n\017ModifyNamespace\022 ." + + "hbase.pb.ModifyNamespaceRequest\032!.hbase." + + "pb.ModifyNamespaceResponse\022V\n\017CreateName" + + "space\022 .hbase.pb.CreateNamespaceRequest\032", + "!.hbase.pb.CreateNamespaceResponse\022V\n\017De" + + "leteNamespace\022 .hbase.pb.DeleteNamespace" + + "Request\032!.hbase.pb.DeleteNamespaceRespon" + + "se\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb." + + "GetNamespaceDescriptorRequest\032(.hbase.pb" + + ".GetNamespaceDescriptorResponse\022q\n\030ListN" + + "amespaceDescriptors\022).hbase.pb.ListNames" + + "paceDescriptorsRequest\032*.hbase.pb.ListNa" + + "mespaceDescriptorsResponse\022\206\001\n\037ListTable" + + "DescriptorsByNamespace\0220.hbase.pb.ListTa", + "bleDescriptorsByNamespaceRequest\0321.hbase" + + ".pb.ListTableDescriptorsByNamespaceRespo" + + "nse\022t\n\031ListTableNamesByNamespace\022*.hbase" + + ".pb.ListTableNamesByNamespaceRequest\032+.h" + + "base.pb.ListTableNamesByNamespaceRespons" + + "e\022P\n\rGetTableState\022\036.hbase.pb.GetTableSt" + + "ateRequest\032\037.hbase.pb.GetTableStateRespo" + + "nse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReque" + + "st\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLas" + + "tMajorCompactionTimestamp\022).hbase.pb.Maj", + "orCompactionTimestampRequest\032*.hbase.pb." + + "MajorCompactionTimestampResponse\022\212\001\n(get" + + "LastMajorCompactionTimestampForRegion\0222." + + "hbase.pb.MajorCompactionTimestampForRegi" + + "onRequest\032*.hbase.pb.MajorCompactionTime" + + "stampResponse\022_\n\022getProcedureResult\022#.hb" + + "ase.pb.GetProcedureResultRequest\032$.hbase" + + ".pb.GetProcedureResultResponse\022h\n\027getSec" + + "urityCapabilities\022%.hbase.pb.SecurityCap" + + "abilitiesRequest\032&.hbase.pb.SecurityCapa", + "bilitiesResponse\022S\n\016AbortProcedure\022\037.hba" + + "se.pb.AbortProcedureRequest\032 .hbase.pb.A" + + "bortProcedureResponse\022S\n\016ListProcedures\022" + + "\037.hbase.pb.ListProceduresRequest\032 .hbase" + + ".pb.ListProceduresResponseBI\n1org.apache" + + ".hadoop.hbase.shaded.protobuf.generatedB" + + "\014MasterProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -69808,608 +71402,620 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor, new java.lang.String[] { "ProcId", }); - internal_static_hbase_pb_AssignRegionRequest_descriptor = + internal_static_hbase_pb_MergeTableRegionsRequest_descriptor = getDescriptor().getMessageTypes().get(10); + internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_MergeTableRegionsRequest_descriptor, + new java.lang.String[] { "Region", "Forcible", "NonceGroup", "Nonce", }); + internal_static_hbase_pb_MergeTableRegionsResponse_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_MergeTableRegionsResponse_descriptor, + new java.lang.String[] { "ProcId", }); + internal_static_hbase_pb_AssignRegionRequest_descriptor = + getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_AssignRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AssignRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_AssignRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_hbase_pb_AssignRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AssignRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_UnassignRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_UnassignRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UnassignRegionRequest_descriptor, new java.lang.String[] { "Region", "Force", }); internal_static_hbase_pb_UnassignRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_UnassignRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UnassignRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_OfflineRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_OfflineRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_OfflineRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_OfflineRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_OfflineRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_OfflineRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_CreateTableRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_CreateTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateTableRequest_descriptor, new java.lang.String[] { "TableSchema", "SplitKeys", "NonceGroup", "Nonce", }); internal_static_hbase_pb_CreateTableResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_CreateTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_DeleteTableRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_DeleteTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteTableRequest_descriptor, new java.lang.String[] { "TableName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_DeleteTableResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_DeleteTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_TruncateTableRequest_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_TruncateTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_TruncateTableRequest_descriptor, new java.lang.String[] { "TableName", "PreserveSplits", "NonceGroup", "Nonce", }); internal_static_hbase_pb_TruncateTableResponse_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_TruncateTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_TruncateTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_EnableTableRequest_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_EnableTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableTableRequest_descriptor, new java.lang.String[] { "TableName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_EnableTableResponse_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_EnableTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_DisableTableRequest_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(26); internal_static_hbase_pb_DisableTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DisableTableRequest_descriptor, new java.lang.String[] { "TableName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_DisableTableResponse_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(27); internal_static_hbase_pb_DisableTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DisableTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_ModifyTableRequest_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(28); internal_static_hbase_pb_ModifyTableRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyTableRequest_descriptor, new java.lang.String[] { "TableName", "TableSchema", "NonceGroup", "Nonce", }); internal_static_hbase_pb_ModifyTableResponse_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(29); internal_static_hbase_pb_ModifyTableResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyTableResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_CreateNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(30); internal_static_hbase_pb_CreateNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceDescriptor", "NonceGroup", "Nonce", }); internal_static_hbase_pb_CreateNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(31); internal_static_hbase_pb_CreateNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CreateNamespaceResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_DeleteNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(32); internal_static_hbase_pb_DeleteNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceName", "NonceGroup", "Nonce", }); internal_static_hbase_pb_DeleteNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(33); internal_static_hbase_pb_DeleteNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteNamespaceResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_ModifyNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(34); internal_static_hbase_pb_ModifyNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceDescriptor", "NonceGroup", "Nonce", }); internal_static_hbase_pb_ModifyNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(35); internal_static_hbase_pb_ModifyNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ModifyNamespaceResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetNamespaceDescriptorRequest_descriptor = - getDescriptor().getMessageTypes().get(34); + getDescriptor().getMessageTypes().get(36); internal_static_hbase_pb_GetNamespaceDescriptorRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetNamespaceDescriptorRequest_descriptor, new java.lang.String[] { "NamespaceName", }); internal_static_hbase_pb_GetNamespaceDescriptorResponse_descriptor = - getDescriptor().getMessageTypes().get(35); + getDescriptor().getMessageTypes().get(37); internal_static_hbase_pb_GetNamespaceDescriptorResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetNamespaceDescriptorResponse_descriptor, new java.lang.String[] { "NamespaceDescriptor", }); internal_static_hbase_pb_ListNamespaceDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(36); + getDescriptor().getMessageTypes().get(38); internal_static_hbase_pb_ListNamespaceDescriptorsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListNamespaceDescriptorsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListNamespaceDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(37); + getDescriptor().getMessageTypes().get(39); internal_static_hbase_pb_ListNamespaceDescriptorsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListNamespaceDescriptorsResponse_descriptor, new java.lang.String[] { "NamespaceDescriptor", }); internal_static_hbase_pb_ListTableDescriptorsByNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(38); + getDescriptor().getMessageTypes().get(40); internal_static_hbase_pb_ListTableDescriptorsByNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableDescriptorsByNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceName", }); internal_static_hbase_pb_ListTableDescriptorsByNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(39); + getDescriptor().getMessageTypes().get(41); internal_static_hbase_pb_ListTableDescriptorsByNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableDescriptorsByNamespaceResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_hbase_pb_ListTableNamesByNamespaceRequest_descriptor = - getDescriptor().getMessageTypes().get(40); + getDescriptor().getMessageTypes().get(42); internal_static_hbase_pb_ListTableNamesByNamespaceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableNamesByNamespaceRequest_descriptor, new java.lang.String[] { "NamespaceName", }); internal_static_hbase_pb_ListTableNamesByNamespaceResponse_descriptor = - getDescriptor().getMessageTypes().get(41); + getDescriptor().getMessageTypes().get(43); internal_static_hbase_pb_ListTableNamesByNamespaceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListTableNamesByNamespaceResponse_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_ShutdownRequest_descriptor = - getDescriptor().getMessageTypes().get(42); + getDescriptor().getMessageTypes().get(44); internal_static_hbase_pb_ShutdownRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ShutdownRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ShutdownResponse_descriptor = - getDescriptor().getMessageTypes().get(43); + getDescriptor().getMessageTypes().get(45); internal_static_hbase_pb_ShutdownResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ShutdownResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_StopMasterRequest_descriptor = - getDescriptor().getMessageTypes().get(44); + getDescriptor().getMessageTypes().get(46); internal_static_hbase_pb_StopMasterRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopMasterRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_StopMasterResponse_descriptor = - getDescriptor().getMessageTypes().get(45); + getDescriptor().getMessageTypes().get(47); internal_static_hbase_pb_StopMasterResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopMasterResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor = - getDescriptor().getMessageTypes().get(46); + getDescriptor().getMessageTypes().get(48); internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor = - getDescriptor().getMessageTypes().get(47); + getDescriptor().getMessageTypes().get(49); internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor, new java.lang.String[] { "InMaintenanceMode", }); internal_static_hbase_pb_BalanceRequest_descriptor = - getDescriptor().getMessageTypes().get(48); + getDescriptor().getMessageTypes().get(50); internal_static_hbase_pb_BalanceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_BalanceRequest_descriptor, new java.lang.String[] { "Force", }); internal_static_hbase_pb_BalanceResponse_descriptor = - getDescriptor().getMessageTypes().get(49); + getDescriptor().getMessageTypes().get(51); internal_static_hbase_pb_BalanceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_BalanceResponse_descriptor, new java.lang.String[] { "BalancerRan", }); internal_static_hbase_pb_SetBalancerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(50); + getDescriptor().getMessageTypes().get(52); internal_static_hbase_pb_SetBalancerRunningRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetBalancerRunningRequest_descriptor, new java.lang.String[] { "On", "Synchronous", }); internal_static_hbase_pb_SetBalancerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(51); + getDescriptor().getMessageTypes().get(53); internal_static_hbase_pb_SetBalancerRunningResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetBalancerRunningResponse_descriptor, new java.lang.String[] { "PrevBalanceValue", }); internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(52); + getDescriptor().getMessageTypes().get(54); internal_static_hbase_pb_IsBalancerEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(53); + getDescriptor().getMessageTypes().get(55); internal_static_hbase_pb_IsBalancerEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(54); + getDescriptor().getMessageTypes().get(56); internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor, new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", }); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(55); + getDescriptor().getMessageTypes().get(57); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(56); + getDescriptor().getMessageTypes().get(58); internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor, new java.lang.String[] { "SwitchType", }); internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(57); + getDescriptor().getMessageTypes().get(59); internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_NormalizeRequest_descriptor = - getDescriptor().getMessageTypes().get(58); + getDescriptor().getMessageTypes().get(60); internal_static_hbase_pb_NormalizeRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_NormalizeRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_NormalizeResponse_descriptor = - getDescriptor().getMessageTypes().get(59); + getDescriptor().getMessageTypes().get(61); internal_static_hbase_pb_NormalizeResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_NormalizeResponse_descriptor, new java.lang.String[] { "NormalizerRan", }); internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(60); + getDescriptor().getMessageTypes().get(62); internal_static_hbase_pb_SetNormalizerRunningRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor, new java.lang.String[] { "On", }); internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(61); + getDescriptor().getMessageTypes().get(63); internal_static_hbase_pb_SetNormalizerRunningResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor, new java.lang.String[] { "PrevNormalizerValue", }); internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(62); + getDescriptor().getMessageTypes().get(64); internal_static_hbase_pb_IsNormalizerEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(63); + getDescriptor().getMessageTypes().get(65); internal_static_hbase_pb_IsNormalizerEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_RunCatalogScanRequest_descriptor = - getDescriptor().getMessageTypes().get(64); + getDescriptor().getMessageTypes().get(66); internal_static_hbase_pb_RunCatalogScanRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RunCatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(65); + getDescriptor().getMessageTypes().get(67); internal_static_hbase_pb_RunCatalogScanResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanResponse_descriptor, new java.lang.String[] { "ScanResult", }); internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(66); + getDescriptor().getMessageTypes().get(68); internal_static_hbase_pb_EnableCatalogJanitorRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor, new java.lang.String[] { "Enable", }); internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(67); + getDescriptor().getMessageTypes().get(69); internal_static_hbase_pb_EnableCatalogJanitorResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(68); + getDescriptor().getMessageTypes().get(70); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(69); + getDescriptor().getMessageTypes().get(71); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor, new java.lang.String[] { "Value", }); internal_static_hbase_pb_SnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(70); + getDescriptor().getMessageTypes().get(72); internal_static_hbase_pb_SnapshotRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_SnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(71); + getDescriptor().getMessageTypes().get(73); internal_static_hbase_pb_SnapshotResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SnapshotResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", }); internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor = - getDescriptor().getMessageTypes().get(72); + getDescriptor().getMessageTypes().get(74); internal_static_hbase_pb_GetCompletedSnapshotsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor = - getDescriptor().getMessageTypes().get(73); + getDescriptor().getMessageTypes().get(75); internal_static_hbase_pb_GetCompletedSnapshotsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor, new java.lang.String[] { "Snapshots", }); internal_static_hbase_pb_DeleteSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(74); + getDescriptor().getMessageTypes().get(76); internal_static_hbase_pb_DeleteSnapshotRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_DeleteSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(77); internal_static_hbase_pb_DeleteSnapshotResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RestoreSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(78); internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", }); internal_static_hbase_pb_RestoreSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(79); internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(80); internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(81); internal_static_hbase_pb_IsSnapshotDoneResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(82); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(83); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", }); internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(82); + getDescriptor().getMessageTypes().get(84); internal_static_hbase_pb_GetSchemaAlterStatusRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(83); + getDescriptor().getMessageTypes().get(85); internal_static_hbase_pb_GetSchemaAlterStatusResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor, new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", }); internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(84); + getDescriptor().getMessageTypes().get(86); internal_static_hbase_pb_GetTableDescriptorsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor, new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(85); + getDescriptor().getMessageTypes().get(87); internal_static_hbase_pb_GetTableDescriptorsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_hbase_pb_GetTableNamesRequest_descriptor = - getDescriptor().getMessageTypes().get(86); + getDescriptor().getMessageTypes().get(88); internal_static_hbase_pb_GetTableNamesRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesRequest_descriptor, new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableNamesResponse_descriptor = - getDescriptor().getMessageTypes().get(87); + getDescriptor().getMessageTypes().get(89); internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); internal_static_hbase_pb_GetTableStateRequest_descriptor = - getDescriptor().getMessageTypes().get(88); + getDescriptor().getMessageTypes().get(90); internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableStateRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetTableStateResponse_descriptor = - getDescriptor().getMessageTypes().get(89); + getDescriptor().getMessageTypes().get(91); internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetTableStateResponse_descriptor, new java.lang.String[] { "TableState", }); internal_static_hbase_pb_GetClusterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(90); + getDescriptor().getMessageTypes().get(92); internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(91); + getDescriptor().getMessageTypes().get(93); internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_hbase_pb_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(92); + getDescriptor().getMessageTypes().get(94); internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(93); + getDescriptor().getMessageTypes().get(95); internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_hbase_pb_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(94); + getDescriptor().getMessageTypes().get(96); internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(95); + getDescriptor().getMessageTypes().get(97); internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_hbase_pb_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(96); + getDescriptor().getMessageTypes().get(98); internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(97); + getDescriptor().getMessageTypes().get(99); internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_GetProcedureResultRequest_descriptor = - getDescriptor().getMessageTypes().get(98); + getDescriptor().getMessageTypes().get(100); internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultRequest_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetProcedureResultResponse_descriptor = - getDescriptor().getMessageTypes().get(99); + getDescriptor().getMessageTypes().get(101); internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_hbase_pb_AbortProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(100); + getDescriptor().getMessageTypes().get(102); internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureRequest_descriptor, new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); internal_static_hbase_pb_AbortProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(101); + getDescriptor().getMessageTypes().get(103); internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureResponse_descriptor, new java.lang.String[] { "IsProcedureAborted", }); internal_static_hbase_pb_ListProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(102); + getDescriptor().getMessageTypes().get(104); internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListProceduresRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(103); + getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ListProceduresResponse_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(104); + getDescriptor().getMessageTypes().get(106); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(105); + getDescriptor().getMessageTypes().get(107); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(106); + getDescriptor().getMessageTypes().get(108); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(107); + getDescriptor().getMessageTypes().get(109); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(108); + getDescriptor().getMessageTypes().get(110); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(109); + getDescriptor().getMessageTypes().get(111); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(110); + getDescriptor().getMessageTypes().get(112); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, diff --git hbase-protocol-shaded/src/main/protobuf/Admin.proto hbase-protocol-shaded/src/main/protobuf/Admin.proto index 47d39be..36221c24 100644 --- hbase-protocol-shaded/src/main/protobuf/Admin.proto +++ hbase-protocol-shaded/src/main/protobuf/Admin.proto @@ -119,14 +119,14 @@ message CloseRegionResponse { } /** - * Closes the specified region and create - * child region. + * Closes the specified region(s) for + * split or merge */ -message CloseRegionForSplitRequest { - required RegionSpecifier region = 1; +message CloseRegionForSplitOrMergeRequest { + repeated RegionSpecifier region = 1; } -message CloseRegionForSplitResponse { +message CloseRegionForSplitOrMergeResponse { required bool closed = 1; } @@ -295,8 +295,8 @@ service AdminService { rpc CloseRegion(CloseRegionRequest) returns(CloseRegionResponse); - rpc CloseRegionForSplit(CloseRegionForSplitRequest) - returns(CloseRegionForSplitResponse); + rpc CloseRegionForSplitOrMerge(CloseRegionForSplitOrMergeRequest) + returns(CloseRegionForSplitOrMergeResponse); rpc FlushRegion(FlushRegionRequest) returns(FlushRegionResponse); diff --git hbase-protocol-shaded/src/main/protobuf/Master.proto hbase-protocol-shaded/src/main/protobuf/Master.proto index 9e6d1ed..b283ed9 100644 --- hbase-protocol-shaded/src/main/protobuf/Master.proto +++ hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -93,6 +93,20 @@ message DispatchMergingRegionsResponse { optional uint64 proc_id = 1; } +/** + * Merging the specified regions in a table. + */ +message MergeTableRegionsRequest { + repeated RegionSpecifier region = 1; + optional bool forcible = 3 [default = false]; + optional uint64 nonce_group = 4 [default = 0]; + optional uint64 nonce = 5 [default = 0]; +} + +message MergeTableRegionsResponse { + optional uint64 proc_id = 1; +} + message AssignRegionRequest { required RegionSpecifier region = 1; } @@ -593,6 +607,10 @@ service MasterService { rpc DispatchMergingRegions(DispatchMergingRegionsRequest) returns(DispatchMergingRegionsResponse); + /** Master merge the regions */ + rpc MergeTableRegions(MergeTableRegionsRequest) + returns(MergeTableRegionsResponse); + /** Assign a region to a server chosen at random. */ rpc AssignRegion(AssignRegionRequest) returns(AssignRegionResponse); diff --git hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto index 8926605..23d914e 100644 --- hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto +++ hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto @@ -277,11 +277,32 @@ message DispatchMergingRegionsStateData { optional bool forcible = 4; } +enum MergeTableRegionsState { + MERGE_TABLE_REGIONS_PREPARE = 1; + MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 2; + MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 3; + MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE = 4; + MERGE_TABLE_REGIONS_CLOSE_REGIONS = 5; + MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 6; + MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 7; + MERGE_TABLE_REGIONS_UPDATE_META = 8; + MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 9; + MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 10; + MERGE_TABLE_REGIONS_POST_OPERATION = 11; +} + +message MergeTableRegionsStateData { + required UserInformation user_info = 1; + repeated RegionInfo region_info = 2; + required RegionInfo merged_region_info = 3; + optional bool forcible = 4 [default = false]; +} + enum SplitTableRegionState { SPLIT_TABLE_REGION_PREPARE = 1; SPLIT_TABLE_REGION_PRE_OPERATION = 2; SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3; - SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4; + SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 4; SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5; SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6; SPLIT_TABLE_REGION_UPDATE_META = 7; diff --git hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 70167bb..2065939 100644 --- hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -1030,6 +1030,18 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService } @Override + public void preMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override + public void postMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override public void preMoveServers(ObserverContext ctx, Set servers, String targetGroup) throws IOException { } @@ -1133,7 +1145,40 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService } @Override - public void preRollBackSplitRegionAction( + public void postRollBackSplitRegionAction( final ObserverContext ctx) throws IOException { } + + @Override + public void preMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override + public void postCompletedMergeRegionsAction( + final ObserverContext c, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void preMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final List metaEntries) throws IOException { + } + + @Override + public void postMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void postRollBackMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java index 21381e8..93b2085 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; -import org.apache.hadoop.hbase.regionserver.Region; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -480,6 +479,18 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver } @Override + public void preMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override + public void postMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override public void preAbortProcedure( ObserverContext ctx, final ProcedureExecutor procEnv, @@ -831,7 +842,40 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver } @Override - public void preRollBackSplitRegionAction(final ObserverContext ctx) + public void postRollBackSplitRegionAction(final ObserverContext ctx) throws IOException { } + + @Override + public void preMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override + public void postCompletedMergeRegionsAction( + final ObserverContext c, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void preMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final List metaEntries) throws IOException { + } + + @Override + public void postMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void postRollBackMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 4d24a84..23afe4b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; -import org.apache.hadoop.hbase.regionserver.Region; @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.CONFIG}) @InterfaceStability.Evolving @@ -755,6 +754,18 @@ public class BaseMasterObserver implements MasterObserver { } @Override + public void preMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override + public void postMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override public void preAbortProcedure( ObserverContext ctx, final ProcedureExecutor procEnv, @@ -852,11 +863,44 @@ public class BaseMasterObserver implements MasterObserver { } @Override - public void preRollBackSplitRegionAction(final ObserverContext ctx) + public void postRollBackSplitRegionAction(final ObserverContext ctx) throws IOException { } @Override + public void preMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override + public void postCompletedMergeRegionsAction( + final ObserverContext c, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void preMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final List metaEntries) throws IOException { + } + + @Override + public void postMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void postRollBackMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override public void preBalance(ObserverContext ctx) throws IOException { } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index e90f753..9abcd52 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ServerName; @@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; @@ -1196,14 +1198,74 @@ public interface MasterObserver extends Coprocessor { throws IOException; /** - * This will be called before the roll back of the split region is completed + * This will be called after the roll back of the split region is completed * @param ctx the environment to interact with the framework and master * @throws IOException */ - void preRollBackSplitRegionAction(final ObserverContext ctx) + void postRollBackSplitRegionAction(final ObserverContext ctx) throws IOException; /** + * Called before the regions merge. + * Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} to skip the merge. + * @throws IOException if an error occurred on the coprocessor + * @param ctx + * @param regionsToMerge + * @throws IOException + */ + void preMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException; + + /** + * called after the regions merge. + * @param c + * @param regionsToMerge + * @param mergedRegion + * @throws IOException + */ + void postCompletedMergeRegionsAction( + final ObserverContext c, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException; + + /** + * This will be called before PONR step as part of regions merge transaction. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} rollback the merge + * @param ctx + * @param regionsToMerge + * @param metaEntries mutations to execute on hbase:meta atomically with regions merge updates. + * Any puts or deletes to execute on hbase:meta can be added to the mutations. + * @throws IOException + */ + void preMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + @MetaMutationAnnotation List metaEntries) throws IOException; + + /** + * This will be called after PONR step as part of regions merge transaction. + * @param ctx + * @param regionsToMerge + * @param mergedRegion + * @throws IOException + */ + void postMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException; + + /** + * This will be called after the roll back of the regions merge. + * @param ctx + * @param regionsToMerge + * @throws IOException + */ + void postRollBackMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException; + + /** * Called prior to modifying the flag used to enable/disable region balancing. * @param ctx the coprocessor instance's environment * @param newValue the new flag value submitted in the call @@ -1651,6 +1713,27 @@ public interface MasterObserver extends Coprocessor { final HRegionInfo regionA, final HRegionInfo regionB) throws IOException; /** + * Called before merge regions request. + * It can't bypass the default action, e.g., ctx.bypass() won't have effect. + * @param ctx coprocessor environment + * @param regionsToMerge regions to be merged + * @throws IOException if an error occurred on the coprocessor + */ + void preMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException; + + /** + * called after merge regions request. + * @param c coprocessor environment + * @param regionsToMerge regions to be merged + * @throws IOException if an error occurred on the coprocessor + */ + void postMergeRegions( + final ObserverContext c, + final HRegionInfo[] regionsToMerge) throws IOException; + + /** * Called before servers are moved to target region server group * @param ctx the environment to interact with the framework and master * @param servers set of servers to move diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 3540b19..a8061a1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -2630,6 +2630,39 @@ public class AssignmentManager { return null; } + public void assignMergedRegion( + final HRegionInfo mergedRegion, + final HRegionInfo daughterAHRI, + final HRegionInfo daughterBHRI) throws InterruptedException, IOException { + //Offline the daughter regions + regionOffline(daughterAHRI, State.MERGED); + regionOffline(daughterBHRI, State.MERGED); + + //Set merged region to offline + regionStates.prepareAssignMergedRegion(mergedRegion); + + // Assign merged region + invokeAssign(mergedRegion); + + Callable mergeReplicasCallable = new Callable() { + @Override + public Object call() { + doMergingOfReplicas(mergedRegion, daughterAHRI, daughterBHRI); + return null; + } + }; + threadPoolExecutorService.submit(mergeReplicasCallable); + + // wait for assignment completion + ArrayList regionAssignSet = new ArrayList(1); + regionAssignSet.add(mergedRegion); + while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(), Long.MAX_VALUE)) { + LOG.debug("The merged region " + mergedRegion + " is still in transition. "); + } + + regionStateListener.onRegionMerged(mergedRegion); + } + private String onRegionMerged(final RegionState current, final HRegionInfo hri, final ServerName serverName, final RegionStateTransition transition) { // The region must be in merging_new state, and the daughters must be diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 5f2e2a6..710c48a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -109,6 +109,7 @@ import org.apache.hadoop.hbase.master.procedure.DispatchMergingRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.MergeTableRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure; import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; @@ -1420,6 +1421,50 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override + public long mergeRegions( + final HRegionInfo[] regionsToMerge, + final boolean forcible, + final long nonceGroup, + final long nonce) throws IOException { + checkInitialized(); + + assert(regionsToMerge.length == 2); + + TableName tableName = regionsToMerge[0].getTable(); + if (tableName == null || regionsToMerge[1].getTable() == null) { + throw new UnknownRegionException ("Can't merge regions without table associated"); + } + + if (!tableName.equals(regionsToMerge[1].getTable())) { + throw new IOException ( + "Cannot merge regions from two different tables " + regionsToMerge[0].getTable() + + " and " + regionsToMerge[1].getTable()); + } + + if (regionsToMerge[0].compareTo(regionsToMerge[1]) == 0) { + throw new MergeRegionException( + "Cannot merge a region to itself " + regionsToMerge[0] + ", " + regionsToMerge[1]); + } + + if (cpHost != null) { + cpHost.preMergeRegions(regionsToMerge); + } + + LOG.info(getClientIdAuditPrefix() + " Merge regions " + + regionsToMerge[0].getEncodedName() + " and " + regionsToMerge[1].getEncodedName()); + + long procId = this.procedureExecutor.submitProcedure( + new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), regionsToMerge, forcible), + nonceGroup, + nonce); + + if (cpHost != null) { + cpHost.postMergeRegions(regionsToMerge); + } + return procId; + } + + @Override public long splitRegion( final HRegionInfo regionInfo, final byte[] splitRow, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index d0ac765..a18068d 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ServerName; @@ -792,6 +793,28 @@ public class MasterCoprocessorHost }); } + public void preMergeRegions(final HRegionInfo[] regionsToMerge) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.preMergeRegions(ctx, regionsToMerge); + } + }); + } + + public void postMergeRegions(final HRegionInfo[] regionsToMerge) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.postMergeRegions(ctx, regionsToMerge); + } + }); + } + public boolean preBalance() throws IOException { return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { @Override @@ -928,16 +951,110 @@ public class MasterCoprocessorHost } /** - * Invoked just before the rollback of a failed split is started + * Invoked just after the rollback of a failed split * @param user the user * @throws IOException */ - public void preRollBackSplitAction(final User user) throws IOException { + public void postRollBackSplitRegionAction(final User user) throws IOException { execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { @Override public void call(MasterObserver oserver, ObserverContext ctx) throws IOException { - oserver.preRollBackSplitRegionAction(ctx); + oserver.postRollBackSplitRegionAction(ctx); + } + }); + } + + /** + * Invoked just before a merge + * @param regionsToMerge the regions to merge + * @param user the user + * @throws IOException + */ + public boolean preMergeRegionsAction( + final HRegionInfo[] regionsToMerge, final User user) throws IOException { + return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.preMergeRegionsAction(ctx, regionsToMerge); + } + }); + } + + /** + * Invoked after completing merge regions operation + * @param regionsToMerge the regions to merge + * @param mergedRegion the new merged region + * @param user the user + * @throws IOException + */ + public void postCompletedMergeRegionsAction( + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion, + final User user) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postCompletedMergeRegionsAction(ctx, regionsToMerge, mergedRegion); + } + }); + } + + /** + * Invoked before merge regions operation writes the new region to hbase:meta + * @param regionsToMerge the regions to merge + * @param metaEntries the meta entry + * @param user the user + * @throws IOException + */ + public boolean preMergeRegionsCommit( + final HRegionInfo[] regionsToMerge, + final @MetaMutationAnnotation List metaEntries, + final User user) throws IOException { + return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.preMergeRegionsCommitAction(ctx, regionsToMerge, metaEntries); + } + }); + } + + /** + * Invoked after merge regions operation writes the new region to hbase:meta + * @param regionsToMerge the regions to merge + * @param mergedRegion the new merged region + * @param user the user + * @throws IOException + */ + public void postMergeRegionsCommit( + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion, + final User user) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postMergeRegionsCommitAction(ctx, regionsToMerge, mergedRegion); + } + }); + } + + /** + * Invoked after rollback merge regions operation + * @param regionsToMerge the regions to merge + * @param user the user + * @throws IOException + */ + public void postRollBackMergeRegionsAction( + final HRegionInfo[] regionsToMerge, final User user) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postRollBackMergeRegionsAction(ctx, regionsToMerge); } }); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 97eb209..709b3f2 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -553,6 +553,46 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MergeTableRegionsResponse mergeTableRegions( + RpcController c, MergeTableRegionsRequest request) throws ServiceException { + try { + master.checkInitialized(); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + + RegionStates regionStates = master.getAssignmentManager().getRegionStates(); + + assert(request.getRegionCount() == 2); + HRegionInfo[] regionsToMerge = new HRegionInfo[request.getRegionCount()]; + for (int i = 0; i < request.getRegionCount(); i++) { + final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray(); + if (request.getRegion(i).getType() != RegionSpecifierType.ENCODED_REGION_NAME) { + LOG.warn("MergeRegions specifier type: expected: " + + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region " + i + " =" + + request.getRegion(i).getType()); + } + RegionState regionState = regionStates.getRegionState(Bytes.toString(encodedNameOfRegion)); + if (regionState == null) { + throw new ServiceException( + new UnknownRegionException(Bytes.toStringBinary(encodedNameOfRegion))); + } + regionsToMerge[i] = regionState.getRegion(); + } + + try { + long procId = master.mergeRegions( + regionsToMerge, + request.getForcible(), + request.getNonceGroup(), + request.getNonce()); + return MergeTableRegionsResponse.newBuilder().setProcId(procId).build(); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } + + @Override public SplitTableRegionResponse splitRegion( final RpcController controller, final SplitTableRegionRequest request) throws ServiceException { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index fa1c33d..a4c27f3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -265,6 +265,21 @@ public interface MasterServices extends Server { throws IOException; /** + * Merge regions in a table. + * @param regionsToMerge daughter regions to merge + * @param forcible whether to force to merge even two regions are not adjacent + * @param nonceGroup used to detect duplicate + * @param nonce used to detect duplicate + * @return procedure Id + * @throws IOException + */ + long mergeRegions( + final HRegionInfo[] regionsToMerge, + final boolean forcible, + final long nonceGroup, + final long nonce) throws IOException; + + /** * Split a region. * @param regionInfo region to split * @param splitRow split point @@ -273,7 +288,7 @@ public interface MasterServices extends Server { * @return procedure Id * @throws IOException */ - public long splitRegion( + long splitRegion( final HRegionInfo regionInfo, final byte [] splitRow, final long nonceGroup, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index b199374..7c2df61 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -896,6 +896,14 @@ public class RegionStates { } } + public void prepareAssignMergedRegion(HRegionInfo mergedRegion) { + synchronized (this) { + if (isRegionInState(mergedRegion, State.MERGING_NEW)) { + updateRegionState(mergedRegion, State.OFFLINE, null); + } + } + } + void splitRegion(HRegionInfo p, HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index a567e1d..b76cd7e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -823,23 +823,22 @@ public class ServerManager { * A region server could reject the close request because it either does not * have the specified region or the region is being split. * @param server server to close a region - * @param regionToClose the info of the region to close + * @param regionToClose the info of the region(s) to close * @throws IOException */ - public boolean sendRegionCloseForSplit( + public boolean sendRegionCloseForSplitOrMerge( final ServerName server, - final HRegionInfo regionToClose) throws IOException { + final HRegionInfo... regionToClose) throws IOException { if (server == null) { throw new NullPointerException("Passed server is null"); } AdminService.BlockingInterface admin = getRsAdmin(server); if (admin == null) { - throw new IOException("Attempting to send CLOSE For Split RPC to server " + - server.toString() + " for region " + regionToClose.getRegionNameAsString() + - " failed because no RPC connection found to this server"); + throw new IOException("Attempting to send CLOSE For Split or Merge RPC to server " + + server.toString() + " failed because no RPC connection found to this server."); } HBaseRpcController controller = newRpcController(); - return ProtobufUtil.closeRegionForSplit(controller, admin, server, regionToClose); + return ProtobufUtil.closeRegionForSplitOrMerge(controller, admin, server, regionToClose); } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java new file mode 100644 index 0000000..c313700 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java @@ -0,0 +1,907 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InterruptedIOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaMutationAnnotation; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.exceptions.MergeRegionException; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.CatalogJanitor; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; + +/** + * The procedure to Merge a region in a table. + */ +@InterfaceAudience.Private +public class MergeTableRegionsProcedure + extends AbstractStateMachineTableProcedure { + private static final Log LOG = LogFactory.getLog(MergeTableRegionsProcedure.class); + + private Boolean traceEnabled; + private AssignmentManager assignmentManager; + private int timeout; + private ServerName regionLocation; + private String regionsToMergeListFullName; + private String regionsToMergeListEncodedName; + + private HRegionInfo [] regionsToMerge; + private HRegionInfo mergedRegionInfo; + private boolean forcible; + + public MergeTableRegionsProcedure() { + this.traceEnabled = isTraceEnabled(); + this.assignmentManager = null; + this.timeout = -1; + this.regionLocation = null; + this.regionsToMergeListFullName = null; + this.regionsToMergeListEncodedName = null; + } + + public MergeTableRegionsProcedure( + final MasterProcedureEnv env, + final HRegionInfo[] regionsToMerge, + final boolean forcible) throws IOException { + super(env); + this.traceEnabled = isTraceEnabled(); + this.assignmentManager = getAssignmentManager(env); + // For now, we only merge 2 regions. It could be extended to more than 2 regions in + // the future. + assert(regionsToMerge.length == 2); + assert(regionsToMerge[0].getTable() == regionsToMerge[1].getTable()); + this.regionsToMerge = regionsToMerge; + this.forcible = forcible; + + this.timeout = -1; + this.regionsToMergeListFullName = getRegionsToMergeListFullNameString(); + this.regionsToMergeListEncodedName = getRegionsToMergeListEncodedNameString(); + + // Check daughter regions and make sure that we have valid daughter regions before + // doing the real work. + checkDaughterRegions(); + // WARN: make sure there is no parent region of the two merging regions in + // hbase:meta If exists, fixing up daughters would cause daughter regions(we + // have merged one) online again when we restart master, so we should clear + // the parent region to prevent the above case + // Since HBASE-7721, we don't need fix up daughters any more. so here do + // nothing + setupMergedRegionInfo(); + } + + @Override + protected Flow executeFromState( + final MasterProcedureEnv env, + final MergeTableRegionsState state) throws InterruptedException { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case MERGE_TABLE_REGIONS_PREPARE: + prepareMergeRegion(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS); + break; + case MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS: + if (MoveRegionsToSameRS(env)) { + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION); + } else { + LOG.info("Cancel merging regions " + getRegionsToMergeListFullNameString() + + ", because can't move them to the same RS"); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_POST_OPERATION); + } + break; + case MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION: + preMergeRegions(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE); + break; + case MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE: + setRegionStateToMerging(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_CLOSE_REGIONS); + break; + case MERGE_TABLE_REGIONS_CLOSE_REGIONS: + closeRegionsForMerge(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_CREATE_MERGED_REGION); + break; + case MERGE_TABLE_REGIONS_CREATE_MERGED_REGION: + createMergedRegion(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION); + break; + case MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION: + preMergeRegionsCommit(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_UPDATE_META); + break; + case MERGE_TABLE_REGIONS_UPDATE_META: + updateMetaForMergedRegions(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION); + break; + case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION: + postMergeRegionsCommit(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_OPEN_MERGED_REGION); + break; + case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION: + openMergedRegions(env); + setNextState(MergeTableRegionsState.MERGE_TABLE_REGIONS_POST_OPERATION); + break; + case MERGE_TABLE_REGIONS_POST_OPERATION: + postCompletedMergeRegions(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + LOG.warn("Error trying to merge regions " + getRegionsToMergeListFullNameString() + + " in the table " + getTableName() + " (in state=" + state + ")", e); + + setFailure("master-merge-regions", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState( + final MasterProcedureEnv env, + final MergeTableRegionsState state) throws IOException, InterruptedException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + + try { + switch (state) { + case MERGE_TABLE_REGIONS_POST_OPERATION: + case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION: + case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION: + case MERGE_TABLE_REGIONS_UPDATE_META: + String msg = this + " We are in the " + state + " state." + + " It is complicated to rollback the merge operation that region server is working on." + + " Rollback is not supported and we should let the merge operation to complete"; + LOG.warn(msg); + // PONR + throw new UnsupportedOperationException(this + " unhandled state=" + state); + case MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION: + break; + case MERGE_TABLE_REGIONS_CREATE_MERGED_REGION: + cleanupMergedRegion(env); + break; + case MERGE_TABLE_REGIONS_CLOSE_REGIONS: + rollbackCloseRegionsForMerge(env); + break; + case MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE: + setRegionStateToRevertMerging(env); + break; + case MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION: + postRollBackMergeRegions(env); + break; + case MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS: + break; // nothing to rollback + case MERGE_TABLE_REGIONS_PREPARE: + break; // nothing to rollback + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (Exception e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for merging the regions " + + getRegionsToMergeListFullNameString() + " in table " + getTableName(), e); + throw e; + } + } + + /* + * Check whether we are in the state that can be rollback + */ + @Override + protected boolean isRollbackSupported(final MergeTableRegionsState state) { + switch (state) { + case MERGE_TABLE_REGIONS_POST_OPERATION: + case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION: + case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION: + case MERGE_TABLE_REGIONS_UPDATE_META: + // It is not safe to rollback if we reach to these states. + return false; + default: + break; + } + return true; + } + + @Override + protected MergeTableRegionsState getState(final int stateId) { + return MergeTableRegionsState.valueOf(stateId); + } + + @Override + protected int getStateId(final MergeTableRegionsState state) { + return state.getNumber(); + } + + @Override + protected MergeTableRegionsState getInitialState() { + return MergeTableRegionsState.MERGE_TABLE_REGIONS_PREPARE; + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.MergeTableRegionsStateData.Builder mergeTableRegionsMsg = + MasterProcedureProtos.MergeTableRegionsStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) + .setMergedRegionInfo(HRegionInfo.convert(mergedRegionInfo)) + .setForcible(forcible); + for (HRegionInfo hri: regionsToMerge) { + mergeTableRegionsMsg.addRegionInfo(HRegionInfo.convert(hri)); + } + mergeTableRegionsMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.MergeTableRegionsStateData mergeTableRegionsMsg = + MasterProcedureProtos.MergeTableRegionsStateData.parseDelimitedFrom(stream); + setUser(MasterProcedureUtil.toUserInfo(mergeTableRegionsMsg.getUserInfo())); + + assert(mergeTableRegionsMsg.getRegionInfoCount() == 2); + regionsToMerge = new HRegionInfo[mergeTableRegionsMsg.getRegionInfoCount()]; + for (int i = 0; i < regionsToMerge.length; i++) { + regionsToMerge[i] = HRegionInfo.convert(mergeTableRegionsMsg.getRegionInfo(i)); + } + + mergedRegionInfo = HRegionInfo.convert(mergeTableRegionsMsg.getMergedRegionInfo()); + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(getTableName()); + sb.append(" regions="); + sb.append(getRegionsToMergeListFullNameString()); + sb.append(" forcible="); + sb.append(forcible); + sb.append(")"); + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return !env.getProcedureQueue().waitRegions( + this, getTableName(), regionsToMerge[0], regionsToMerge[1]); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().wakeRegions(this, getTableName(), regionsToMerge[0], regionsToMerge[1]); + } + + @Override + public TableName getTableName() { + return regionsToMerge[0].getTable(); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.MERGE; + } + + /** + * check daughter regions + * @throws IOException + */ + private void checkDaughterRegions() throws IOException { + // Note: the following logic assumes that we only have 2 regions to merge. In the future, + // if we want to extend to more than 2 regions, the code needs to modify a little bit. + // + if (regionsToMerge[0].getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || + regionsToMerge[1].getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { + throw new MergeRegionException("Can't merge non-default replicas"); + } + + if (!HRegionInfo.areAdjacent(regionsToMerge[0], regionsToMerge[1])) { + String msg = "Trying to merge non-adjacent regions " + + getRegionsToMergeListFullNameString() + " where forcible = " + forcible; + LOG.warn(msg); + if (!forcible) { + throw new DoNotRetryIOException(msg); + } + } + } + + /** + * Prepare merge and do some check + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareMergeRegion(final MasterProcedureEnv env) throws IOException { + // Note: the following logic assumes that we only have 2 regions to merge. In the future, + // if we want to extend to more than 2 regions, the code needs to modify a little bit. + // + CatalogJanitor catalogJanitor = env.getMasterServices().getCatalogJanitor(); + boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(regionsToMerge[0]); + if (regionAHasMergeQualifier + || !catalogJanitor.cleanMergeQualifier(regionsToMerge[1])) { + String msg = "Skip merging regions " + getRegionsToMergeListFullNameString() + + ", because region " + + (regionAHasMergeQualifier ? regionsToMerge[0].getEncodedName() : regionsToMerge[1] + .getEncodedName()) + " has merge qualifier"; + LOG.warn(msg); + throw new MergeRegionException(msg); + } + + RegionStates regionStates = getAssignmentManager(env).getRegionStates(); + RegionState regionStateA = regionStates.getRegionState(regionsToMerge[0].getEncodedName()); + RegionState regionStateB = regionStates.getRegionState(regionsToMerge[1].getEncodedName()); + if (regionStateA == null || regionStateB == null) { + throw new UnknownRegionException( + regionStateA == null ? + regionsToMerge[0].getEncodedName() : regionsToMerge[1].getEncodedName()); + } + + if (!regionStateA.isOpened() || !regionStateB.isOpened()) { + throw new MergeRegionException( + "Unable to merge regions not online " + regionStateA + ", " + regionStateB); + } + } + + /** + * Create merged region info through the specified two regions + */ + private void setupMergedRegionInfo() { + long rid = EnvironmentEdgeManager.currentTime(); + // Regionid is timestamp. Merged region's id can't be less than that of + // merging regions else will insert at wrong location in hbase:meta + if (rid < regionsToMerge[0].getRegionId() || rid < regionsToMerge[1].getRegionId()) { + LOG.warn("Clock skew; merging regions id are " + regionsToMerge[0].getRegionId() + + " and " + regionsToMerge[1].getRegionId() + ", but current time here is " + rid); + rid = Math.max(regionsToMerge[0].getRegionId(), regionsToMerge[1].getRegionId()) + 1; + } + + byte[] startKey = null; + byte[] endKey = null; + // Choose the smaller as start key + if (regionsToMerge[0].compareTo(regionsToMerge[1]) <= 0) { + startKey = regionsToMerge[0].getStartKey(); + } else { + startKey = regionsToMerge[1].getStartKey(); + } + // Choose the bigger as end key + if (Bytes.equals(regionsToMerge[0].getEndKey(), HConstants.EMPTY_BYTE_ARRAY) + || (!Bytes.equals(regionsToMerge[1].getEndKey(), HConstants.EMPTY_BYTE_ARRAY) + && Bytes.compareTo(regionsToMerge[0].getEndKey(), regionsToMerge[1].getEndKey()) > 0)) { + endKey = regionsToMerge[0].getEndKey(); + } else { + endKey = regionsToMerge[1].getEndKey(); + } + + // Merged region is sorted between two merging regions in META + mergedRegionInfo = new HRegionInfo(getTableName(), startKey, endKey, false, rid); + } + + /** + * Move all regions to the same region server + * @param env MasterProcedureEnv + * @return whether target regions hosted by the same RS + * @throws IOException + */ + private boolean MoveRegionsToSameRS(final MasterProcedureEnv env) throws IOException { + // Make sure regions are on the same regionserver before send merge + // regions request to region server. + // + boolean onSameRS = isRegionsOnTheSameServer(env); + if (!onSameRS) { + // Note: the following logic assumes that we only have 2 regions to merge. In the future, + // if we want to extend to more than 2 regions, the code needs to modify a little bit. + // + RegionStates regionStates = getAssignmentManager(env).getRegionStates(); + ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]); + + RegionLoad loadOfRegionA = getRegionLoad(env, regionLocation, regionsToMerge[0]); + RegionLoad loadOfRegionB = getRegionLoad(env, regionLocation2, regionsToMerge[1]); + if (loadOfRegionA != null && loadOfRegionB != null + && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) { + // switch regionsToMerge[0] and regionsToMerge[1] + HRegionInfo tmpRegion = this.regionsToMerge[0]; + this.regionsToMerge[0] = this.regionsToMerge[1]; + this.regionsToMerge[1] = tmpRegion; + ServerName tmpLocation = regionLocation; + regionLocation = regionLocation2; + regionLocation2 = tmpLocation; + } + + long startTime = EnvironmentEdgeManager.currentTime(); + + RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation); + LOG.info("Moving regions to same server for merge: " + regionPlan.toString()); + getAssignmentManager(env).balance(regionPlan); + do { + try { + Thread.sleep(20); + // Make sure check RIT first, then get region location, otherwise + // we would make a wrong result if region is online between getting + // region location and checking RIT + boolean isRIT = regionStates.isRegionInTransition(regionsToMerge[1]); + regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]); + onSameRS = regionLocation.equals(regionLocation2); + if (onSameRS || !isRIT) { + // Regions are on the same RS, or regionsToMerge[1] is not in + // RegionInTransition any more + break; + } + } catch (InterruptedException e) { + InterruptedIOException iioe = new InterruptedIOException(); + iioe.initCause(e); + throw iioe; + } + } while ((EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env)); + } + return onSameRS; + } + + /** + * Pre merge region action + * @param env MasterProcedureEnv + **/ + private void preMergeRegions(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + boolean ret = cpHost.preMergeRegionsAction(regionsToMerge, getUser()); + if (ret) { + throw new IOException( + "Coprocessor bypassing regions " + getRegionsToMergeListFullNameString() + " merge."); + } + } + } + + /** + * Action after rollback a merge table regions action. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void postRollBackMergeRegions(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.postRollBackMergeRegionsAction(regionsToMerge, getUser()); + } + } + + /** + * Set the region states to MERGING state + * @param env MasterProcedureEnv + * @throws IOException + */ + public void setRegionStateToMerging(final MasterProcedureEnv env) throws IOException { + RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); + transition.setTransitionCode(TransitionCode.READY_TO_MERGE); + transition.addRegionInfo(HRegionInfo.convert(mergedRegionInfo)); + transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[0])); + transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[1])); + if (env.getMasterServices().getAssignmentManager().onRegionTransition( + getServerName(env), transition.build()) != null) { + throw new IOException("Failed to update region state to MERGING for " + + getRegionsToMergeListFullNameString()); + } + } + + /** + * Rollback the region state change + * @param env MasterProcedureEnv + * @throws IOException + */ + private void setRegionStateToRevertMerging(final MasterProcedureEnv env) throws IOException { + RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); + transition.setTransitionCode(TransitionCode.MERGE_REVERTED); + transition.addRegionInfo(HRegionInfo.convert(mergedRegionInfo)); + transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[0])); + transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[1])); + String msg = env.getMasterServices().getAssignmentManager().onRegionTransition( + getServerName(env), transition.build()); + if (msg != null) { + // If daughter regions are online, the msg is coming from RPC retry. Ignore it. + RegionStates regionStates = getAssignmentManager(env).getRegionStates(); + if (!regionStates.isRegionOnline(regionsToMerge[0]) || + !regionStates.isRegionOnline(regionsToMerge[1])) { + throw new IOException("Failed to update region state for " + + getRegionsToMergeListFullNameString() + + " as part of operation for reverting merge. Error message: " + msg); + } + } + } + + /** + * Create merged region + * @param env MasterProcedureEnv + * @throws IOException + */ + private void createMergedRegion(final MasterProcedureEnv env) throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable()); + final FileSystem fs = mfs.getFileSystem(); + HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( + env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false); + regionFs.createMergesDir(); + + mergeStoreFiles(env, regionFs, regionFs.getMergesDir()); + HRegionFileSystem regionFs2 = HRegionFileSystem.openRegionFromFileSystem( + env.getMasterConfiguration(), fs, tabledir, regionsToMerge[1], false); + mergeStoreFiles(env, regionFs2, regionFs.getMergesDir()); + + regionFs.commitMergedRegion(mergedRegionInfo); + } + + /** + * Create reference file(s) of merging regions under the merges directory + * @param env MasterProcedureEnv + * @param regionFs region file system + * @param mergedDir the temp directory of merged region + * @throws IOException + */ + private void mergeStoreFiles( + final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) + throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Configuration conf = env.getMasterConfiguration(); + final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + + for (String family: regionFs.getFamilies()) { + final HColumnDescriptor hcd = htd.getFamily(family.getBytes()); + final Collection storeFiles = regionFs.getStoreFiles(family); + + if (storeFiles != null && storeFiles.size() > 0) { + final CacheConfig cacheConf = new CacheConfig(conf, hcd); + for (StoreFileInfo storeFileInfo: storeFiles) { + // Create reference file(s) of the region in mergedDir + regionFs.mergeStoreFile( + mergedRegionInfo, + family, + new StoreFile( + mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()), + mergedDir); + } + } + } + } + + /** + * Clean up merged region + * @param env MasterProcedureEnv + * @throws IOException + */ + private void cleanupMergedRegion(final MasterProcedureEnv env) throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable()); + final FileSystem fs = mfs.getFileSystem(); + HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( + env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false); + regionFs.cleanupMergedRegion(mergedRegionInfo); + } + + /** + * RPC to region server that host the regions to merge, ask for close these regions + * @param env MasterProcedureEnv + * @throws IOException + */ + private void closeRegionsForMerge(final MasterProcedureEnv env) throws IOException { + boolean success = env.getMasterServices().getServerManager().sendRegionCloseForSplitOrMerge( + getServerName(env), regionsToMerge[0], regionsToMerge[1]); + if (!success) { + throw new IOException("Close regions " + getRegionsToMergeListFullNameString() + + " for merging failed. Check region server log for more details."); + } + } + + /** + * Rollback close regions + * @param env MasterProcedureEnv + **/ + private void rollbackCloseRegionsForMerge(final MasterProcedureEnv env) throws IOException { + // Check whether the region is closed; if so, open it in the same server + RegionStates regionStates = getAssignmentManager(env).getRegionStates(); + for(int i = 1; i < regionsToMerge.length; i++) { + RegionState state = regionStates.getRegionState(regionsToMerge[i]); + if (state != null && (state.isClosing() || state.isClosed())) { + env.getMasterServices().getServerManager().sendRegionOpen( + getServerName(env), + regionsToMerge[i], + ServerName.EMPTY_SERVER_LIST); + } + } + } + + /** + * Post merge region action + * @param env MasterProcedureEnv + **/ + private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + @MetaMutationAnnotation + final List metaEntries = new ArrayList(); + boolean ret = cpHost.preMergeRegionsCommit(regionsToMerge, metaEntries, getUser()); + + if (ret) { + throw new IOException( + "Coprocessor bypassing regions " + getRegionsToMergeListFullNameString() + " merge."); + } + try { + for (Mutation p : metaEntries) { + HRegionInfo.parseRegionName(p.getRow()); + } + } catch (IOException e) { + LOG.error("Row key of mutation from coprocessor is not parsable as region name." + + "Mutations from coprocessor should only be for hbase:meta table.", e); + throw e; + } + } + } + + /** + * Add merged region to META and delete original regions. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void updateMetaForMergedRegions(final MasterProcedureEnv env) throws IOException { + RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); + transition.setTransitionCode(TransitionCode.MERGE_PONR); + transition.addRegionInfo(HRegionInfo.convert(mergedRegionInfo)); + transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[0])); + transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[1])); + // Add merged region and delete original regions + // as an atomic update. See HBASE-7721. This update to hbase:meta makes the region + // will determine whether the region is merged or not in case of failures. + if (env.getMasterServices().getAssignmentManager().onRegionTransition( + getServerName(env), transition.build()) != null) { + throw new IOException("Failed to update meta to add merged region that merges " + + getRegionsToMergeListFullNameString()); + } + } + + /** + * Post merge region action + * @param env MasterProcedureEnv + **/ + private void postMergeRegionsCommit(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.postMergeRegionsCommit(regionsToMerge, mergedRegionInfo, getUser()); + } + } + + /** + * Assign merged region + * @param env MasterProcedureEnv + * @throws IOException + * @throws InterruptedException + **/ + private void openMergedRegions(final MasterProcedureEnv env) + throws IOException, InterruptedException { + // Check whether the merged region is already opened; if so, + // this is retry and we should just ignore. + RegionState regionState = + getAssignmentManager(env).getRegionStates().getRegionState(mergedRegionInfo); + if (regionState != null && regionState.isOpened()) { + LOG.info("Skip opening merged region " + mergedRegionInfo.getRegionNameAsString() + + " as it is already opened."); + return; + } + + // TODO: The new AM should provide an API to force assign the merged region to the same RS + // as daughter regions; if the RS is unavailable, then assign to a different RS. + env.getMasterServices().getAssignmentManager().assignMergedRegion( + mergedRegionInfo, regionsToMerge[0], regionsToMerge[1]); + } + + /** + * Post merge region action + * @param env MasterProcedureEnv + **/ + private void postCompletedMergeRegions(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.postCompletedMergeRegionsAction(regionsToMerge, mergedRegionInfo, getUser()); + } + } + + private RegionLoad getRegionLoad( + final MasterProcedureEnv env, + final ServerName sn, + final HRegionInfo hri) { + ServerManager serverManager = env.getMasterServices().getServerManager(); + ServerLoad load = serverManager.getLoad(sn); + if (load != null) { + Map regionsLoad = load.getRegionsLoad(); + if (regionsLoad != null) { + return regionsLoad.get(hri.getRegionName()); + } + } + return null; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param env MasterProcedureEnv + * @return whether target regions hosted by the same RS + */ + private boolean isRegionsOnTheSameServer(final MasterProcedureEnv env) throws IOException{ + Boolean onSameRS = true; + int i = 0; + RegionStates regionStates = getAssignmentManager(env).getRegionStates(); + regionLocation = regionStates.getRegionServerOfRegion(regionsToMerge[i]); + if (regionLocation != null) { + for(i = 1; i < regionsToMerge.length; i++) { + ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[i]); + if (regionLocation2 != null) { + if (onSameRS) { + onSameRS = regionLocation.equals(regionLocation2); + } + } else { + // At least one region is not online, merge will fail, no need to continue. + break; + } + } + if (i == regionsToMerge.length) { + // Finish checking all regions, return the result; + return onSameRS; + } + } + + // If reaching here, at least one region is not online. + String msg = "Skip merging regions " + getRegionsToMergeListFullNameString() + + ", because region " + regionsToMerge[i].getEncodedName() + " is not online now."; + LOG.warn(msg); + throw new IOException(msg); + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param env MasterProcedureEnv + * @return assignmentManager + */ + private AssignmentManager getAssignmentManager(final MasterProcedureEnv env) { + if (assignmentManager == null) { + assignmentManager = env.getMasterServices().getAssignmentManager(); + } + return assignmentManager; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param env MasterProcedureEnv + * @return timeout value + */ + private int getTimeout(final MasterProcedureEnv env) { + if (timeout == -1) { + timeout = env.getMasterConfiguration().getInt( + "hbase.master.regionmerge.timeout", regionsToMerge.length * 60 * 1000); + } + return timeout; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param env MasterProcedureEnv + * @return serverName + */ + private ServerName getServerName(final MasterProcedureEnv env) { + if (regionLocation == null) { + regionLocation = + getAssignmentManager(env).getRegionStates().getRegionServerOfRegion(regionsToMerge[0]); + } + return regionLocation; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param fullName whether return only encoded name + * @return region names in a list + */ + private String getRegionsToMergeListFullNameString() { + if (regionsToMergeListFullName == null) { + StringBuilder sb = new StringBuilder("["); + int i = 0; + while(i < regionsToMerge.length - 1) { + sb.append(regionsToMerge[i].getRegionNameAsString() + ", "); + i++; + } + sb.append(regionsToMerge[i].getRegionNameAsString() + " ]"); + regionsToMergeListFullName = sb.toString(); + } + return regionsToMergeListFullName; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return encoded region names + */ + private String getRegionsToMergeListEncodedNameString() { + if (regionsToMergeListEncodedName == null) { + StringBuilder sb = new StringBuilder("["); + int i = 0; + while(i < regionsToMerge.length - 1) { + sb.append(regionsToMerge[i].getEncodedName() + ", "); + i++; + } + sb.append(regionsToMerge[i].getEncodedName() + " ]"); + regionsToMergeListEncodedName = sb.toString(); + } + return regionsToMergeListEncodedName; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java index 883ac9a..4730ad8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java @@ -161,9 +161,9 @@ public class SplitTableRegionProcedure break; case SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE: setRegionStateToSplitting(env); - setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CLOSED_PARENT_REGION); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CLOSE_PARENT_REGION); break; - case SPLIT_TABLE_REGION_CLOSED_PARENT_REGION: + case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION: closeParentRegionForSplit(env); setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS); break; @@ -242,14 +242,14 @@ public class SplitTableRegionProcedure case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS: // Doing nothing, as re-open parent region would clean up daughter region directories. break; - case SPLIT_TABLE_REGION_CLOSED_PARENT_REGION: + case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION: openParentRegion(env); break; case SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE: setRegionStateToRevertSplitting(env); break; case SPLIT_TABLE_REGION_PRE_OPERATION: - preSplitRegionRollback(env); + postRollBackSplitRegion(env); break; case SPLIT_TABLE_REGION_PREPARE: break; // nothing to do @@ -408,15 +408,14 @@ public class SplitTableRegionProcedure } /** - * Action during rollback a pre split table region. + * Action after rollback a split table region action. * @param env MasterProcedureEnv - * @param state the procedure state * @throws IOException */ - private void preSplitRegionRollback(final MasterProcedureEnv env) throws IOException { + private void postRollBackSplitRegion(final MasterProcedureEnv env) throws IOException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { - cpHost.preRollBackSplitAction(getUser()); + cpHost.postRollBackSplitRegionAction(getUser()); } } @@ -458,14 +457,13 @@ public class SplitTableRegionProcedure } /** - * RPC to region server that host the parent region, ask for close the parent regions and - * creating daughter regions + * RPC to region server that host the parent region, ask for close the parent regions * @param env MasterProcedureEnv * @throws IOException */ @VisibleForTesting public void closeParentRegionForSplit(final MasterProcedureEnv env) throws IOException { - boolean success = env.getMasterServices().getServerManager().sendRegionCloseForSplit( + boolean success = env.getMasterServices().getServerManager().sendRegionCloseForSplitOrMerge( getParentRegionState(env).getServerName(), parentHRI); if (!success) { throw new IOException("Close parent region " + parentHRI + " for splitting failed." diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 50382a4..d4e80c3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -647,7 +647,7 @@ public class HRegionFileSystem { // Merge Helpers // =========================================================================== /** @return {@link Path} to the temp directory used during merge operations */ - Path getMergesDir() { + public Path getMergesDir() { return new Path(getRegionDir(), REGION_MERGES_DIR); } @@ -667,7 +667,7 @@ public class HRegionFileSystem { * @param mergedRegion {@link HRegionInfo} * @throws IOException */ - void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException { + public void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException { Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName()); if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) { throw new IOException("Failed delete of " + regionDir); @@ -679,7 +679,7 @@ public class HRegionFileSystem { * @throws IOException If merges dir already exists or we fail to create it. * @see HRegionFileSystem#cleanupMergesDir() */ - void createMergesDir() throws IOException { + public void createMergesDir() throws IOException { Path mergesdir = getMergesDir(); if (fs.exists(mergesdir)) { LOG.info("The " + mergesdir @@ -703,7 +703,7 @@ public class HRegionFileSystem { * @return Path to created reference. * @throws IOException */ - Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName, + public Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName, final StoreFile f, final Path mergedDir) throws IOException { Path referenceDir = new Path(new Path(mergedDir, @@ -728,7 +728,7 @@ public class HRegionFileSystem { * @param mergedRegionInfo merged region {@link HRegionInfo} * @throws IOException */ - void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException { + public void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException { Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName()); Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo); // Move the tmp dir in the expected location diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 56fc6eb..3e4a23e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -48,7 +48,6 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.management.MalformedObjectNameException; @@ -3026,54 +3025,58 @@ public class HRegionServer extends HasThread implements } /** - * Close and offline the region for split + * Close and offline the region for split or merge * - * @param parentRegionEncodedName the name of the region to close - * @return True if closed the region successfully. + * @param regionEncodedName the name of the region(s) to close + * @return true if closed the region successfully. * @throws IOException */ - protected boolean closeAndOfflineRegionForSplit( - final String parentRegionEncodedName) throws IOException { - Region parentRegion = this.getFromOnlineRegions(parentRegionEncodedName); - if (parentRegion != null) { - Map> hstoreFilesToSplit = null; - Exception exceptionToThrow = null; - try{ - hstoreFilesToSplit = ((HRegion)parentRegion).close(false); - } catch (Exception e) { - exceptionToThrow = e; - } - if (exceptionToThrow == null && hstoreFilesToSplit == null) { - // The region was closed by someone else - exceptionToThrow = - new IOException("Failed to close region: already closed by another thread"); - } + protected boolean closeAndOfflineRegionForSplitOrMerge( + final List regionEncodedName) throws IOException { + for (int i = 0; i < regionEncodedName.size(); ++i) { + Region regionToClose = this.getFromOnlineRegions(regionEncodedName.get(i)); + if (regionToClose != null) { + Map> hstoreFiles = null; + Exception exceptionToThrow = null; + try{ + hstoreFiles = ((HRegion)regionToClose).close(false); + } catch (Exception e) { + exceptionToThrow = e; + } + if (exceptionToThrow == null && hstoreFiles == null) { + // The region was closed by someone else + exceptionToThrow = + new IOException("Failed to close region: already closed by another thread"); + } - if (exceptionToThrow != null) { - if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow; - throw new IOException(exceptionToThrow); - } - if (parentRegion.getTableDesc().hasSerialReplicationScope()) { - // For serial replication, we need add a final barrier on this region. But the splitting may - // be reverted, so we should make sure if we reopen this region, the open barrier is same as - // this final barrier - long seq = parentRegion.getMaxFlushedSeqId(); - if (seq == HConstants.NO_SEQNUM) { - // No edits in WAL for this region; get the sequence number when the region was opened. - seq = parentRegion.getOpenSeqNum(); + if (exceptionToThrow != null) { + if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow; + throw new IOException(exceptionToThrow); + } + if (regionToClose.getTableDesc().hasSerialReplicationScope()) { + // For serial replication, we need add a final barrier on this region. But the splitting + // or merging may be reverted, so we should make sure if we reopen this region, the open + // barrier is same as this final barrier + long seq = regionToClose.getMaxFlushedSeqId(); if (seq == HConstants.NO_SEQNUM) { - // This region has no data - seq = 0; + // No edits in WAL for this region; get the sequence number when the region was opened. + seq = regionToClose.getOpenSeqNum(); + if (seq == HConstants.NO_SEQNUM) { + // This region has no data + seq = 0; + } + } else { + seq++; } - } else { - seq++; + Put finalBarrier = MetaTableAccessor.makeBarrierPut( + Bytes.toBytes(regionEncodedName.get(i)), + seq, + regionToClose.getTableDesc().getTableName().getName()); + MetaTableAccessor.putToMetaTable(getConnection(), finalBarrier); } - Put finalBarrier = MetaTableAccessor.makeBarrierPut(Bytes.toBytes(parentRegionEncodedName), - seq, parentRegion.getTableDesc().getTableName().getName()); - MetaTableAccessor.putToMetaTable(getConnection(), finalBarrier); + // Offline the region + this.removeFromOnlineRegions(regionToClose, null); } - // Offline the region - this.removeFromOnlineRegions(parentRegion, null); } return true; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 85b7967..07e16c8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -96,8 +96,8 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -1384,25 +1384,28 @@ public class RSRpcServices implements HBaseRPCErrorHandler, @Override @QosPriority(priority=HConstants.ADMIN_QOS) - public CloseRegionForSplitResponse closeRegionForSplit( + public CloseRegionForSplitOrMergeResponse closeRegionForSplitOrMerge( final RpcController controller, - final CloseRegionForSplitRequest request) throws ServiceException { + final CloseRegionForSplitOrMergeRequest request) throws ServiceException { try { checkOpen(); - final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion()); + List encodedRegionNameList = new ArrayList<>(); + for(int i = 0; i < request.getRegionCount(); i++) { + final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion(i)); - // Can be null if we're calling close on a region that's not online - final Region parentRegion = regionServer.getFromOnlineRegions(encodedRegionName); - if ((parentRegion != null) && (parentRegion.getCoprocessorHost() != null)) { - parentRegion.getCoprocessorHost().preClose(false); + // Can be null if we're calling close on a region that's not online + final Region targetRegion = regionServer.getFromOnlineRegions(encodedRegionName); + if ((targetRegion != null) && (targetRegion.getCoprocessorHost() != null)) { + targetRegion.getCoprocessorHost().preClose(false); + encodedRegionNameList.add(encodedRegionName); + } } - requestCount.increment(); - LOG.info("Close and offline " + encodedRegionName + " and prepare for split."); - boolean closed = regionServer.closeAndOfflineRegionForSplit(encodedRegionName); - CloseRegionForSplitResponse.Builder builder = - CloseRegionForSplitResponse.newBuilder().setClosed(closed); + LOG.info("Close and offline " + encodedRegionNameList + " regions."); + boolean closed = regionServer.closeAndOfflineRegionForSplitOrMerge(encodedRegionNameList); + CloseRegionForSplitOrMergeResponse.Builder builder = + CloseRegionForSplitOrMergeResponse.newBuilder().setClosed(closed); return builder.build(); } catch (IOException ie) { throw new ServiceException(ie); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 2af65a4..65cedda 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -30,7 +30,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -1180,10 +1179,10 @@ public class TestAdmin1 { gotException = false; // Try merging a replica with another. Should fail. try { - // TODO convert this to version that is synchronous (See HBASE-16668) - TEST_UTIL.getAdmin().mergeRegionsAsync(regions.get(1).getFirst().getEncodedNameAsBytes(), - regions.get(2).getFirst().getEncodedNameAsBytes(), true) - .get(60, TimeUnit.SECONDS); + TEST_UTIL.getHBaseAdmin().mergeRegionsSync( + regions.get(1).getFirst().getEncodedNameAsBytes(), + regions.get(2).getFirst().getEncodedNameAsBytes(), + true); } catch (IllegalArgumentException m) { gotException = true; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 5497a3f..7522e85 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -175,6 +175,8 @@ public class TestMasterObserver { private boolean preGetTableNamesCalled; private boolean preDispatchMergeCalled; private boolean postDispatchMergeCalled; + private boolean preMergeRegionsCalled; + private boolean postMergeRegionsCalled; public void enableBypass(boolean bypass) { this.bypass = bypass; @@ -261,6 +263,26 @@ public class TestMasterObserver { preGetTableNamesCalled = false; preDispatchMergeCalled = false; postDispatchMergeCalled = false; + preMergeRegionsCalled = false; + postMergeRegionsCalled = false; + } + + @Override + public void preMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + preMergeRegionsCalled = true; + } + + @Override + public void postMergeRegions( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + postMergeRegionsCalled = true; + } + + public boolean wasMergeRegionsCalled() { + return preMergeRegionsCalled && postMergeRegionsCalled; } @Override @@ -1512,9 +1534,42 @@ public class TestMasterObserver { } @Override - public void preRollBackSplitRegionAction( + public void postRollBackSplitRegionAction( final ObserverContext ctx) throws IOException { } + + @Override + public void preMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } + + @Override + public void postCompletedMergeRegionsAction( + final ObserverContext c, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void preMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final List metaEntries) throws IOException { + } + + @Override + public void postMergeRegionsCommitAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge, + final HRegionInfo mergedRegion) throws IOException { + } + + @Override + public void postRollBackMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + } } private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -1593,7 +1648,7 @@ public class TestMasterObserver { admin.mergeRegionsAsync(regions.get(0).getRegionInfo().getEncodedNameAsBytes(), regions.get(1).getRegionInfo().getEncodedNameAsBytes(), true); assertTrue("Coprocessor should have been called on region merge", - cp.wasDispatchMergeCalled()); + cp.wasMergeRegionsCalled()); tableCreationLatch = new CountDownLatch(1); admin.disableTable(tableName); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java index ecf9da1..4a62bff 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -61,6 +62,7 @@ public class TestRegionServerObserver { * Test verifies the hooks in regions merge. * @throws Exception */ + @Ignore @Test public void testCoprocessorHooksInRegionsMerge() throws Exception { final int NUM_MASTERS = 1; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 2630068..b52f5df 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -287,6 +287,15 @@ public class MockNoopMasterServices implements MasterServices, Server { } @Override + public long mergeRegions( + final HRegionInfo[] regionsToMerge, + final boolean forcible, + final long nonceGroup, + final long nonce) throws IOException { + return -1; + } + + @Override public long splitRegion( final HRegionInfo regionInfo, final byte[] splitRow, diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 1594b6d..ec8054e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -51,8 +51,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; @@ -500,9 +500,9 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public CloseRegionForSplitResponse closeRegionForSplit( + public CloseRegionForSplitOrMergeResponse closeRegionForSplitOrMerge( RpcController controller, - CloseRegionForSplitRequest request) throws ServiceException { + CloseRegionForSplitOrMergeRequest request) throws ServiceException { return null; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java new file mode 100644 index 0000000..1915f69 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java @@ -0,0 +1,263 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@Category({MasterTests.class, MediumTests.class}) +public class TestMergeTableRegionsProcedure { + private static final Log LOG = LogFactory.getLog(TestMergeTableRegionsProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static long nonceGroup = HConstants.NO_NONCE; + private static long nonce = HConstants.NO_NONCE; + + private static final int initialRegionCount = 4; + private final static byte[] FAMILY = Bytes.toBytes("FAMILY"); + final static Configuration conf = UTIL.getConfiguration(); + private static Admin admin; + + private static void setupConf(Configuration conf) { + // Reduce the maximum attempts to speed up the test + conf.setInt("hbase.assignment.maximum.attempts", 3); + conf.setInt("hbase.master.maximum.ping.server.attempts", 3); + conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1); + + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(conf); + UTIL.startMiniCluster(1); + admin = UTIL.getHBaseAdmin(); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + resetProcExecutorTestingKillFlag(); + nonceGroup = + MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster()); + nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster()); + // Turn off balancer so it doesn't cut in and mess up our placements. + UTIL.getHBaseAdmin().setBalancerRunning(false, true); + // Turn off the meta scanner so it don't remove parent on us. + UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false); + resetProcExecutorTestingKillFlag(); + } + + @After + public void tearDown() throws Exception { + resetProcExecutorTestingKillFlag(); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + private void resetProcExecutorTestingKillFlag() { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + } + + /** + * This tests two region merges + */ + @Test(timeout=60000) + public void testMergeTwoRegions() throws Exception { + final TableName tableName = TableName.valueOf("testMergeTwoRegions"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + List tableRegions = createTable(tableName); + + HRegionInfo[] regionsToMerge = new HRegionInfo[2]; + regionsToMerge[0] = tableRegions.get(0); + regionsToMerge[1] = tableRegions.get(1); + + long procId = procExec.submitProcedure(new MergeTableRegionsProcedure( + procExec.getEnvironment(), regionsToMerge, true)); + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + assertRegionCount(tableName, initialRegionCount - 1); + } + + /** + * This tests two concurrent region merges + */ + @Test(timeout=60000) + public void testMergeRegionsConcurrently() throws Exception { + final TableName tableName = TableName.valueOf("testMergeRegionsConcurrently"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + List tableRegions = createTable(tableName); + + HRegionInfo[] regionsToMerge1 = new HRegionInfo[2]; + HRegionInfo[] regionsToMerge2 = new HRegionInfo[2]; + regionsToMerge1[0] = tableRegions.get(0); + regionsToMerge1[1] = tableRegions.get(1); + regionsToMerge2[0] = tableRegions.get(2); + regionsToMerge2[1] = tableRegions.get(3); + + long procId1 = procExec.submitProcedure(new MergeTableRegionsProcedure( + procExec.getEnvironment(), regionsToMerge1, true)); + long procId2 = procExec.submitProcedure(new MergeTableRegionsProcedure( + procExec.getEnvironment(), regionsToMerge2, true)); + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + assertRegionCount(tableName, initialRegionCount - 2); + } + + @Test(timeout=60000) + public void testMergeRegionsTwiceWithSameNonce() throws Exception { + final TableName tableName = TableName.valueOf("testMergeRegionsTwiceWithSameNonce"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + List tableRegions = createTable(tableName); + + HRegionInfo[] regionsToMerge = new HRegionInfo[2]; + regionsToMerge[0] = tableRegions.get(0); + regionsToMerge[1] = tableRegions.get(1); + + long procId1 = procExec.submitProcedure(new MergeTableRegionsProcedure( + procExec.getEnvironment(), regionsToMerge, true), nonceGroup, nonce); + long procId2 = procExec.submitProcedure(new MergeTableRegionsProcedure( + procExec.getEnvironment(), regionsToMerge, true), nonceGroup, nonce); + assertEquals(procId1, procId2); + + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + // The second proc should succeed too - because it is the same proc. + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + + assertRegionCount(tableName, initialRegionCount - 1); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + List tableRegions = createTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + HRegionInfo[] regionsToMerge = new HRegionInfo[2]; + regionsToMerge[0] = tableRegions.get(0); + regionsToMerge[1] = tableRegions.get(1); + + long procId = procExec.submitProcedure( + new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true)); + + // Restart the executor and execute the step twice + int numberOfSteps = MergeTableRegionsState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + assertRegionCount(tableName, initialRegionCount - 1); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + List tableRegions = createTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + HRegionInfo[] regionsToMerge = new HRegionInfo[2]; + regionsToMerge[0] = tableRegions.get(0); + regionsToMerge[1] = tableRegions.get(1); + + long procId = procExec.submitProcedure( + new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true)); + + // Failing before MERGE_TABLE_REGIONS_UPDATE_META we should trigger the rollback + // NOTE: the 6 (number before MERGE_TABLE_REGIONS_UPDATE_META step) is + // hardcoded, so you have to look at this test at least once when you add a new step. + int numberOfSteps = 6; + MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); + } + + private List createTable(final TableName tableName) + throws Exception { + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + byte[][] splitRows = new byte[initialRegionCount - 1][]; + for (int i = 0; i < splitRows.length; ++i) { + splitRows[i] = Bytes.toBytes(String.format("%d", i)); + } + admin.createTable(desc, splitRows); + return assertRegionCount(tableName, initialRegionCount); + } + + public List assertRegionCount(final TableName tableName, final int nregions) + throws Exception { + UTIL.waitUntilNoRegionsInTransition(); + List tableRegions = admin.getTableRegions(tableName); + assertEquals(nregions, tableRegions.size()); + return tableRegions; + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java index fcce0fb..55e38ba 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.CompactionState; @@ -311,8 +310,6 @@ public class TestSplitTableRegionProcedure { HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); insertData(tableName); - int splitRowNum = startRowNum + rowCount / 2; - byte[] splitKey = Bytes.toBytes("" + splitRowNum); assertTrue("not able to find a splittable region", regions != null); assertTrue("not able to find a splittable region", regions.length == 1); @@ -330,7 +327,7 @@ public class TestSplitTableRegionProcedure { } } - @Test(timeout = 600000) + @Test(timeout = 60000) public void testRollbackAndDoubleExecution() throws Exception { final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); final ProcedureExecutor procExec = getMasterProcedureExecutor(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index a25c157..f8a1a93 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableNamespaceManager; @@ -73,9 +74,7 @@ import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.hadoop.hbase.quotas.QuotaUtil; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; @@ -104,11 +103,11 @@ public class TestNamespaceAuditor { @BeforeClass public static void before() throws Exception { - UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - CustomObserver.class.getName()); - UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSyncObserver.class.getName()); Configuration conf = UTIL.getConfiguration(); + conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName()); + conf.setStrings( + CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + MasterSyncObserver.class.getName(), CPMasterObserver.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); conf.setClass("hbase.coprocessor.regionserver.classes", CPRegionServerObserver.class, @@ -309,6 +308,33 @@ public class TestNamespaceAuditor { } } + public static class CPMasterObserver extends BaseMasterObserver { + private volatile boolean shouldFailMerge = false; + + public void failMerge(boolean fail) { + shouldFailMerge = fail; + } + + private boolean triggered = false; + + public synchronized void waitUtilTriggered() throws InterruptedException { + while (!triggered) { + wait(); + } + } + + @Override + public synchronized void preMergeRegionsAction( + final ObserverContext ctx, + final HRegionInfo[] regionsToMerge) throws IOException { + triggered = true; + notifyAll(); + if (shouldFailMerge) { + throw new IOException("fail merge"); + } + } + } + @Test public void testRegionMerge() throws Exception { String nsp1 = prefix + "_regiontest"; @@ -414,18 +440,17 @@ public class TestNamespaceAuditor { // fail region merge through Coprocessor hook MiniHBaseCluster cluster = UTIL.getHBaseCluster(); - HRegionServer regionServer = cluster.getRegionServer(0); - RegionServerCoprocessorHost cpHost = regionServer.getRegionServerCoprocessorHost(); - Coprocessor coprocessor = cpHost.findCoprocessor(CPRegionServerObserver.class.getName()); - CPRegionServerObserver regionServerObserver = (CPRegionServerObserver) coprocessor; - regionServerObserver.failMerge(true); - regionServerObserver.triggered = false; + MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost(); + Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class.getName()); + CPMasterObserver masterObserver = (CPMasterObserver) coprocessor; + masterObserver.failMerge(true); + masterObserver.triggered = false; ADMIN.mergeRegionsAsync( hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false); - regionServerObserver.waitUtilTriggered(); + masterObserver.waitUtilTriggered(); hris = ADMIN.getTableRegions(tableTwo); assertEquals(initialRegions, hris.size()); Collections.sort(hris); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java index 2cb4ecc..6fcccaf 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java @@ -55,12 +55,10 @@ import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; @Category({ ReplicationTests.class, LargeTests.class }) public class TestSerialReplication { @@ -270,7 +268,7 @@ public class TestSerialReplication { } List> regions = MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), tableName); - utility1.getHBaseAdmin().mergeRegions(regions.get(0).getFirst().getRegionName(), + utility1.getHBaseAdmin().mergeRegionsAsync(regions.get(0).getFirst().getRegionName(), regions.get(1).getFirst().getRegionName(), true); waitTableHasRightNumberOfRegions(tableName, 1); for (int i = 11; i < 100; i += 10) {