diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 51d07e3..f4008fd 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -165,7 +165,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 53101de..94efa37 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -104,6 +104,8 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -1720,6 +1722,33 @@ public final class ProtobufUtil {
}
/**
+ * A helper to close a region for split
+ * using admin protocol.
+ *
+ * @param controller RPC controller
+ * @param admin Admin service
+ * @param server the RS that hosts the target region
+ * @param parentRegionInfo the target region info
+ * @return true if the region is closed
+ * @throws IOException
+ */
+ public static boolean closeRegionForSplit(
+ final RpcController controller,
+ final AdminService.BlockingInterface admin,
+ final ServerName server,
+ final HRegionInfo parentRegionInfo) throws IOException {
+ CloseRegionForSplitRequest closeRegionForSplitRequest =
+ ProtobufUtil.buildCloseRegionForSplitRequest(server, parentRegionInfo);
+ try {
+ CloseRegionForSplitResponse response =
+ admin.closeRegionForSplit(controller, closeRegionForSplitRequest);
+ return ResponseConverter.isClosed(response);
+ } catch (ServiceException se) {
+ throw getRemoteException(se);
+ }
+ }
+
+ /**
* A helper to warmup a region given a region name
* using admin protocol
*
@@ -3063,6 +3092,23 @@ public final class ProtobufUtil {
}
/**
+ * Create a CloseRegionForSplitRequest for a given region
+ *
+ * @param server the RS server that hosts the region
+ * @param parentRegionInfo the info of the region to close
+ * @return a CloseRegionForSplitRequest
+ */
+ public static CloseRegionForSplitRequest buildCloseRegionForSplitRequest(
+ final ServerName server,
+ final HRegionInfo parentRegionInfo) {
+ CloseRegionForSplitRequest.Builder builder = CloseRegionForSplitRequest.newBuilder();
+ RegionSpecifier parentRegion = RequestConverter.buildRegionSpecifier(
+ RegionSpecifierType.REGION_NAME, parentRegionInfo.getRegionName());
+ builder.setRegion(parentRegion);
+ return builder.build();
+ }
+
+ /**
* Create a CloseRegionRequest for a given encoded region name
*
* @param encodedRegionName the name of the region to close
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 7da3727..abd1563 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -106,6 +106,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOr
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
@@ -850,7 +851,7 @@ public final class RequestConverter {
return ubuilder.build();
}
- /**
+ /**
* Create a WarmupRegionRequest for a given region name
*
* @param regionInfo Region we are warming up
@@ -1061,6 +1062,19 @@ public final class RequestConverter {
return builder.build();
}
+ public static SplitTableRegionRequest buildSplitTableRegionRequest(
+ final HRegionInfo regionInfo,
+ final byte[] splitPoint,
+ final long nonceGroup,
+ final long nonce) {
+ SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder();
+ builder.setRegionInfo(HRegionInfo.convert(regionInfo));
+ builder.setSplitRow(UnsafeByteOperations.unsafeWrap(splitPoint));
+ builder.setNonceGroup(nonceGroup);
+ builder.setNonce(nonce);
+ return builder.build();
+ }
+
/**
* Create a protocol buffer AssignRegionRequest
*
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index dc7b95d..11fc931 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.SingleResponse;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
@@ -252,6 +253,18 @@ public final class ResponseConverter {
}
/**
+ * Check if the region is closed from a CloseRegionForSplitResponse
+ *
+ * @param proto the CloseRegionForSplitResponse
+ * @return the region close state
+ */
+ public static boolean isClosed
+ (final CloseRegionForSplitResponse proto) {
+ if (proto == null || !proto.hasClosed()) return false;
+ return proto.getClosed();
+ }
+
+ /**
* A utility to build a GetServerInfoResponse.
*
* @param serverName
diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index 20020d4..b4e46b0 100644
--- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -9414,6 +9414,1084 @@ public final class AdminProtos {
}
+ public interface CloseRegionForSplitRequestOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitRequest)
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ boolean hasRegion();
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion();
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder();
+ }
+ /**
+ *
+ ** + * Closes the specified region and create + * child region. + *+ * + * Protobuf type {@code hbase.pb.CloseRegionForSplitRequest} + */ + public static final class CloseRegionForSplitRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitRequest) + CloseRegionForSplitRequestOrBuilder { + // Use CloseRegionForSplitRequest.newBuilder() to construct. + private CloseRegionForSplitRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private CloseRegionForSplitRequest() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CloseRegionForSplitRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.Builder.class); + } + + private int bitField0_; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + *
required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public boolean hasRegion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
+ return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_;
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
+ return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ if (!hasRegion()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getRegion().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, getRegion());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, getRegion());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest) obj;
+
+ boolean result = true;
+ result = result && (hasRegion() == other.hasRegion());
+ if (hasRegion()) {
+ result = result && getRegion()
+ .equals(other.getRegion());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasRegion()) {
+ hash = (37 * hash) + REGION_FIELD_NUMBER;
+ hash = (53 * hash) + getRegion().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + ** + * Closes the specified region and create + * child region. + *+ * + * Protobuf type {@code hbase.pb.CloseRegionForSplitRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public boolean hasRegion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
+ if (regionBuilder_ == null) {
+ return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_;
+ } else {
+ return regionBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ region_ = value;
+ onChanged();
+ } else {
+ regionBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public Builder setRegion(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+ if (regionBuilder_ == null) {
+ region_ = builderForValue.build();
+ onChanged();
+ } else {
+ regionBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ region_ != null &&
+ region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
+ region_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial();
+ } else {
+ region_ = value;
+ }
+ onChanged();
+ } else {
+ regionBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public Builder clearRegion() {
+ if (regionBuilder_ == null) {
+ region_ = null;
+ onChanged();
+ } else {
+ regionBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getRegionFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
+ if (regionBuilder_ != null) {
+ return regionBuilder_.getMessageOrBuilder();
+ } else {
+ return region_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_;
+ }
+ }
+ /**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
+ getRegionFieldBuilder() {
+ if (regionBuilder_ == null) {
+ regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
+ getRegion(),
+ getParentForChildren(),
+ isClean());
+ region_ = null;
+ }
+ return regionBuilder_;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitRequest)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parserrequired bool closed = 1;
+ */
+ boolean hasClosed();
+ /**
+ * required bool closed = 1;
+ */
+ boolean getClosed();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CloseRegionForSplitResponse}
+ */
+ public static final class CloseRegionForSplitResponse extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitResponse)
+ CloseRegionForSplitResponseOrBuilder {
+ // Use CloseRegionForSplitResponse.newBuilder() to construct.
+ private CloseRegionForSplitResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+ private CloseRegionForSplitResponse() {
+ closed_ = false;
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CloseRegionForSplitResponse(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ int mutable_bitField0_ = 0;
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ closed_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+ e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.Builder.class);
+ }
+
+ private int bitField0_;
+ public static final int CLOSED_FIELD_NUMBER = 1;
+ private boolean closed_;
+ /**
+ * required bool closed = 1;
+ */
+ public boolean hasClosed() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bool closed = 1;
+ */
+ public boolean getClosed() {
+ return closed_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ if (!hasClosed()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, closed_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, closed_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) obj;
+
+ boolean result = true;
+ result = result && (hasClosed() == other.hasClosed());
+ if (hasClosed()) {
+ result = result && (getClosed()
+ == other.getClosed());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasClosed()) {
+ hash = (37 * hash) + CLOSED_FIELD_NUMBER;
+ hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
+ getClosed());
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CloseRegionForSplitResponse}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builderrequired bool closed = 1;
+ */
+ public boolean hasClosed() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bool closed = 1;
+ */
+ public boolean getClosed() {
+ return closed_;
+ }
+ /**
+ * required bool closed = 1;
+ */
+ public Builder setClosed(boolean value) {
+ bitField0_ |= 0x00000001;
+ closed_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool closed = 1;
+ */
+ public Builder clearClosed() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ closed_ = false;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitResponse)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitResponse)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parserrpc CloseRegionForSplit(.hbase.pb.CloseRegionForSplitRequest) returns (.hbase.pb.CloseRegionForSplitResponse);
+ */
+ public abstract void closeRegionForSplit(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackrpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse);
*/
public abstract void flushRegion(
@@ -23765,6 +24851,14 @@ public final class AdminProtos {
}
@java.lang.Override
+ public void closeRegionForSplit(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackrpc CloseRegionForSplit(.hbase.pb.CloseRegionForSplitRequest) returns (.hbase.pb.CloseRegionForSplitResponse);
+ */
+ public abstract void closeRegionForSplit(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackrpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse);
*/
public abstract void flushRegion(
@@ -24201,56 +25309,61 @@ public final class AdminProtos {
done));
return;
case 6:
+ this.closeRegionForSplit(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.SPLIT_TABLE_REGION_PREPARE = 1;
+ */
+ SPLIT_TABLE_REGION_PREPARE(1),
+ /**
+ * SPLIT_TABLE_REGION_PRE_OPERATION = 2;
+ */
+ SPLIT_TABLE_REGION_PRE_OPERATION(2),
+ /**
+ * SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3;
+ */
+ SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE(3),
+ /**
+ * SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4;
+ */
+ SPLIT_TABLE_REGION_CLOSED_PARENT_REGION(4),
+ /**
+ * SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5;
+ */
+ SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS(5),
+ /**
+ * SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6;
+ */
+ SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR(6),
+ /**
+ * SPLIT_TABLE_REGION_UPDATE_META = 7;
+ */
+ SPLIT_TABLE_REGION_UPDATE_META(7),
+ /**
+ * SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 8;
+ */
+ SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR(8),
+ /**
+ * SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
+ */
+ SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS(9),
+ /**
+ * SPLIT_TABLE_REGION_POST_OPERATION = 10;
+ */
+ SPLIT_TABLE_REGION_POST_OPERATION(10),
+ ;
+
+ /**
+ * SPLIT_TABLE_REGION_PREPARE = 1;
+ */
+ public static final int SPLIT_TABLE_REGION_PREPARE_VALUE = 1;
+ /**
+ * SPLIT_TABLE_REGION_PRE_OPERATION = 2;
+ */
+ public static final int SPLIT_TABLE_REGION_PRE_OPERATION_VALUE = 2;
+ /**
+ * SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3;
+ */
+ public static final int SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE_VALUE = 3;
+ /**
+ * SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4;
+ */
+ public static final int SPLIT_TABLE_REGION_CLOSED_PARENT_REGION_VALUE = 4;
+ /**
+ * SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5;
+ */
+ public static final int SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS_VALUE = 5;
+ /**
+ * SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6;
+ */
+ public static final int SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR_VALUE = 6;
+ /**
+ * SPLIT_TABLE_REGION_UPDATE_META = 7;
+ */
+ public static final int SPLIT_TABLE_REGION_UPDATE_META_VALUE = 7;
+ /**
+ * SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 8;
+ */
+ public static final int SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR_VALUE = 8;
+ /**
+ * SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
+ */
+ public static final int SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS_VALUE = 9;
+ /**
+ * SPLIT_TABLE_REGION_POST_OPERATION = 10;
+ */
+ public static final int SPLIT_TABLE_REGION_POST_OPERATION_VALUE = 10;
+
+
+ public final int getNumber() {
+ return value;
+ }
+
+ /**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static SplitTableRegionState valueOf(int value) {
+ return forNumber(value);
+ }
+
+ public static SplitTableRegionState forNumber(int value) {
+ switch (value) {
+ case 1: return SPLIT_TABLE_REGION_PREPARE;
+ case 2: return SPLIT_TABLE_REGION_PRE_OPERATION;
+ case 3: return SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE;
+ case 4: return SPLIT_TABLE_REGION_CLOSED_PARENT_REGION;
+ case 5: return SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS;
+ case 6: return SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR;
+ case 7: return SPLIT_TABLE_REGION_UPDATE_META;
+ case 8: return SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR;
+ case 9: return SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS;
+ case 10: return SPLIT_TABLE_REGION_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMaprequired .hbase.pb.ServerName server_name = 1;
+ * required .hbase.pb.UserInformation user_info = 1;
*/
- boolean hasServerName();
+ boolean hasUserInfo();
/**
- * required .hbase.pb.ServerName server_name = 1;
+ * required .hbase.pb.UserInformation user_info = 1;
*/
- org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName();
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo();
/**
- * required .hbase.pb.ServerName server_name = 1;
+ * required .hbase.pb.UserInformation user_info = 1;
*/
- org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
/**
- * optional bool distributed_log_replay = 2;
+ * required .hbase.pb.TableName table_name = 2;
*/
- boolean hasDistributedLogReplay();
+ boolean hasTableName();
/**
- * optional bool distributed_log_replay = 2;
+ * required .hbase.pb.TableName table_name = 2;
*/
- boolean getDistributedLogReplay();
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
/**
- * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
*/
- java.util.Listrepeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
*/
- org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionsOnCrashedServer(int index);
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo();
/**
- * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
*/
- int getRegionsOnCrashedServerCount();
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder();
+
/**
- * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ * optional bytes split_row = 4;
*/
- java.util.List extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
- getRegionsOnCrashedServerOrBuilderList();
+ boolean hasSplitRow();
/**
- * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ * optional bytes split_row = 4;
*/
- org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsOnCrashedServerOrBuilder(
- int index);
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow();
/**
- * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
*/
java.util.Listrepeated .hbase.pb.RegionInfo regions_assigned = 4;
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
*/
- org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionsAssigned(int index);
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index);
/**
- * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
*/
- int getRegionsAssignedCount();
+ int getChildRegionInfoCount();
/**
- * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
*/
java.util.List extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
- getRegionsAssignedOrBuilderList();
+ getChildRegionInfoOrBuilderList();
/**
- * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
*/
- org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsAssignedOrBuilder(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder(
int index);
-
- /**
- * optional bool carrying_meta = 5;
- */
- boolean hasCarryingMeta();
- /**
- * optional bool carrying_meta = 5;
- */
- boolean getCarryingMeta();
-
- /**
- * optional bool should_split_wal = 6 [default = true];
- */
- boolean hasShouldSplitWal();
- /**
- * optional bool should_split_wal = 6 [default = true];
- */
- boolean getShouldSplitWal();
}
/**
- * Protobuf type {@code hbase.pb.ServerCrashStateData}
+ * Protobuf type {@code hbase.pb.SplitTableRegionStateData}
*/
- public static final class ServerCrashStateData extends
+ public static final class SplitTableRegionStateData extends
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
- // @@protoc_insertion_point(message_implements:hbase.pb.ServerCrashStateData)
- ServerCrashStateDataOrBuilder {
- // Use ServerCrashStateData.newBuilder() to construct.
- private ServerCrashStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionStateData)
+ SplitTableRegionStateDataOrBuilder {
+ // Use SplitTableRegionStateData.newBuilder() to construct.
+ private SplitTableRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
- private ServerCrashStateData() {
- distributedLogReplay_ = false;
- regionsOnCrashedServer_ = java.util.Collections.emptyList();
- regionsAssigned_ = java.util.Collections.emptyList();
- carryingMeta_ = false;
- shouldSplitWal_ = true;
+ private SplitTableRegionStateData() {
+ splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
+ childRegionInfo_ = java.util.Collections.emptyList();
}
@java.lang.Override
@@ -21063,7 +21206,7 @@ public final class MasterProcedureProtos {
getUnknownFields() {
return this.unknownFields;
}
- private ServerCrashStateData(
+ private SplitTableRegionStateData(
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
@@ -21087,49 +21230,56 @@ public final class MasterProcedureProtos {
break;
}
case 10: {
- org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- subBuilder = serverName_.toBuilder();
+ subBuilder = userInfo_.toBuilder();
}
- serverName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
if (subBuilder != null) {
- subBuilder.mergeFrom(serverName_);
- serverName_ = subBuilder.buildPartial();
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
- case 16: {
+ case 18: {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
bitField0_ |= 0x00000002;
- distributedLogReplay_ = input.readBool();
break;
}
case 26: {
- if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- regionsOnCrashedServer_ = new java.util.ArrayListrequired .hbase.pb.ServerName server_name = 1;
+ * required .hbase.pb.UserInformation user_info = 1;
*/
- public boolean hasServerName() {
+ public boolean hasUserInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * required .hbase.pb.ServerName server_name = 1;
+ * required .hbase.pb.UserInformation user_info = 1;
*/
- public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName() {
- return serverName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : serverName_;
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
}
/**
- * required .hbase.pb.ServerName server_name = 1;
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
+ }
+
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+ }
+
+ public static final int PARENT_REGION_INFO_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo parentRegionInfo_;
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public boolean hasParentRegionInfo() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo() {
+ return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_;
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder() {
+ return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_;
+ }
+
+ public static final int SPLIT_ROW_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_;
+ /**
+ * optional bytes split_row = 4;
+ */
+ public boolean hasSplitRow() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional bytes split_row = 4;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() {
+ return splitRow_;
+ }
+
+ public static final int CHILD_REGION_INFO_FIELD_NUMBER = 5;
+ private java.util.Listrepeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public java.util.Listrepeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getChildRegionInfoOrBuilderList() {
+ return childRegionInfo_;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public int getChildRegionInfoCount() {
+ return childRegionInfo_.size();
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index) {
+ return childRegionInfo_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder(
+ int index) {
+ return childRegionInfo_.get(index);
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasParentRegionInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getParentRegionInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getChildRegionInfoCount(); i++) {
+ if (!getChildRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, getUserInfo());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, getTableName());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, getParentRegionInfo());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, splitRow_);
+ }
+ for (int i = 0; i < childRegionInfo_.size(); i++) {
+ output.writeMessage(5, childRegionInfo_.get(i));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, getUserInfo());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, getTableName());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, getParentRegionInfo());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, splitRow_);
+ }
+ for (int i = 0; i < childRegionInfo_.size(); i++) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(5, childRegionInfo_.get(i));
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasParentRegionInfo() == other.hasParentRegionInfo());
+ if (hasParentRegionInfo()) {
+ result = result && getParentRegionInfo()
+ .equals(other.getParentRegionInfo());
+ }
+ result = result && (hasSplitRow() == other.hasSplitRow());
+ if (hasSplitRow()) {
+ result = result && getSplitRow()
+ .equals(other.getSplitRow());
+ }
+ result = result && getChildRegionInfoList()
+ .equals(other.getChildRegionInfoList());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasParentRegionInfo()) {
+ hash = (37 * hash) + PARENT_REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getParentRegionInfo().hashCode();
+ }
+ if (hasSplitRow()) {
+ hash = (37 * hash) + SPLIT_ROW_FIELD_NUMBER;
+ hash = (53 * hash) + getSplitRow().hashCode();
+ }
+ if (getChildRegionInfoCount() > 0) {
+ hash = (37 * hash) + CHILD_REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getChildRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.SplitTableRegionStateData}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builderrequired .hbase.pb.UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != null &&
+ userInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = null;
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
+ }
+ }
+ /**
+ * required .hbase.pb.UserInformation user_info = 1;
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ getUserInfo(),
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_ = null;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableName_ != null &&
+ tableName_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = null;
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 2;
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ getTableName(),
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo parentRegionInfo_ = null;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> parentRegionInfoBuilder_;
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public boolean hasParentRegionInfo() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo() {
+ if (parentRegionInfoBuilder_ == null) {
+ return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_;
+ } else {
+ return parentRegionInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public Builder setParentRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (parentRegionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ parentRegionInfo_ = value;
+ onChanged();
+ } else {
+ parentRegionInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public Builder setParentRegionInfo(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (parentRegionInfoBuilder_ == null) {
+ parentRegionInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ parentRegionInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public Builder mergeParentRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (parentRegionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ parentRegionInfo_ != null &&
+ parentRegionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+ parentRegionInfo_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(parentRegionInfo_).mergeFrom(value).buildPartial();
+ } else {
+ parentRegionInfo_ = value;
+ }
+ onChanged();
+ } else {
+ parentRegionInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public Builder clearParentRegionInfo() {
+ if (parentRegionInfoBuilder_ == null) {
+ parentRegionInfo_ = null;
+ onChanged();
+ } else {
+ parentRegionInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getParentRegionInfoBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getParentRegionInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder() {
+ if (parentRegionInfoBuilder_ != null) {
+ return parentRegionInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return parentRegionInfo_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_;
+ }
+ }
+ /**
+ * required .hbase.pb.RegionInfo parent_region_info = 3;
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getParentRegionInfoFieldBuilder() {
+ if (parentRegionInfoBuilder_ == null) {
+ parentRegionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ getParentRegionInfo(),
+ getParentForChildren(),
+ isClean());
+ parentRegionInfo_ = null;
+ }
+ return parentRegionInfoBuilder_;
+ }
+
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
+ /**
+ * optional bytes split_row = 4;
+ */
+ public boolean hasSplitRow() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional bytes split_row = 4;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() {
+ return splitRow_;
+ }
+ /**
+ * optional bytes split_row = 4;
+ */
+ public Builder setSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ splitRow_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional bytes split_row = 4;
+ */
+ public Builder clearSplitRow() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ splitRow_ = getDefaultInstance().getSplitRow();
+ onChanged();
+ return this;
+ }
+
+ private java.util.Listrepeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public java.util.Listrepeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public int getChildRegionInfoCount() {
+ if (childRegionInfoBuilder_ == null) {
+ return childRegionInfo_.size();
+ } else {
+ return childRegionInfoBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index) {
+ if (childRegionInfoBuilder_ == null) {
+ return childRegionInfo_.get(index);
+ } else {
+ return childRegionInfoBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder setChildRegionInfo(
+ int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (childRegionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureChildRegionInfoIsMutable();
+ childRegionInfo_.set(index, value);
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder setChildRegionInfo(
+ int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (childRegionInfoBuilder_ == null) {
+ ensureChildRegionInfoIsMutable();
+ childRegionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder addChildRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (childRegionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureChildRegionInfoIsMutable();
+ childRegionInfo_.add(value);
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder addChildRegionInfo(
+ int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (childRegionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureChildRegionInfoIsMutable();
+ childRegionInfo_.add(index, value);
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder addChildRegionInfo(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (childRegionInfoBuilder_ == null) {
+ ensureChildRegionInfoIsMutable();
+ childRegionInfo_.add(builderForValue.build());
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder addChildRegionInfo(
+ int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (childRegionInfoBuilder_ == null) {
+ ensureChildRegionInfoIsMutable();
+ childRegionInfo_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder addAllChildRegionInfo(
+ java.lang.Iterable extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (childRegionInfoBuilder_ == null) {
+ ensureChildRegionInfoIsMutable();
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+ values, childRegionInfo_);
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder clearChildRegionInfo() {
+ if (childRegionInfoBuilder_ == null) {
+ childRegionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public Builder removeChildRegionInfo(int index) {
+ if (childRegionInfoBuilder_ == null) {
+ ensureChildRegionInfoIsMutable();
+ childRegionInfo_.remove(index);
+ onChanged();
+ } else {
+ childRegionInfoBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getChildRegionInfoBuilder(
+ int index) {
+ return getChildRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder(
+ int index) {
+ if (childRegionInfoBuilder_ == null) {
+ return childRegionInfo_.get(index); } else {
+ return childRegionInfoBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getChildRegionInfoOrBuilderList() {
+ if (childRegionInfoBuilder_ != null) {
+ return childRegionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(childRegionInfo_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addChildRegionInfoBuilder() {
+ return getChildRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addChildRegionInfoBuilder(
+ int index) {
+ return getChildRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo child_region_info = 5;
+ */
+ public java.util.Listrequired .hbase.pb.ServerName server_name = 1;
+ */
+ boolean hasServerName();
+ /**
+ * required .hbase.pb.ServerName server_name = 1;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName();
+ /**
+ * required .hbase.pb.ServerName server_name = 1;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+
+ /**
+ * optional bool distributed_log_replay = 2;
+ */
+ boolean hasDistributedLogReplay();
+ /**
+ * optional bool distributed_log_replay = 2;
+ */
+ boolean getDistributedLogReplay();
+
+ /**
+ * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ */
+ java.util.Listrepeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionsOnCrashedServer(int index);
+ /**
+ * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ */
+ int getRegionsOnCrashedServerCount();
+ /**
+ * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ */
+ java.util.List extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionsOnCrashedServerOrBuilderList();
+ /**
+ * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsOnCrashedServerOrBuilder(
+ int index);
+
+ /**
+ * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ */
+ java.util.Listrepeated .hbase.pb.RegionInfo regions_assigned = 4;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionsAssigned(int index);
+ /**
+ * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ */
+ int getRegionsAssignedCount();
+ /**
+ * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ */
+ java.util.List extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionsAssignedOrBuilderList();
+ /**
+ * repeated .hbase.pb.RegionInfo regions_assigned = 4;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsAssignedOrBuilder(
+ int index);
+
+ /**
+ * optional bool carrying_meta = 5;
+ */
+ boolean hasCarryingMeta();
+ /**
+ * optional bool carrying_meta = 5;
+ */
+ boolean getCarryingMeta();
+
+ /**
+ * optional bool should_split_wal = 6 [default = true];
+ */
+ boolean hasShouldSplitWal();
+ /**
+ * optional bool should_split_wal = 6 [default = true];
+ */
+ boolean getShouldSplitWal();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.ServerCrashStateData}
+ */
+ public static final class ServerCrashStateData extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.ServerCrashStateData)
+ ServerCrashStateDataOrBuilder {
+ // Use ServerCrashStateData.newBuilder() to construct.
+ private ServerCrashStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+ private ServerCrashStateData() {
+ distributedLogReplay_ = false;
+ regionsOnCrashedServer_ = java.util.Collections.emptyList();
+ regionsAssigned_ = java.util.Collections.emptyList();
+ carryingMeta_ = false;
+ shouldSplitWal_ = true;
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private ServerCrashStateData(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ int mutable_bitField0_ = 0;
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = serverName_.toBuilder();
+ }
+ serverName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(serverName_);
+ serverName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ distributedLogReplay_ = input.readBool();
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionsOnCrashedServer_ = new java.util.ArrayListrequired .hbase.pb.ServerName server_name = 1;
+ */
+ public boolean hasServerName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName() {
+ return serverName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : serverName_;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 1;
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
return serverName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : serverName_;
@@ -22640,6 +24288,11 @@ public final class MasterProcedureProtos {
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hbase_pb_DispatchMergingRegionsStateData_fieldAccessorTable;
private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_SplitTableRegionStateData_descriptor;
+ private static final
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable;
+ private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ServerCrashStateData_descriptor;
private static final
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -22732,108 +24385,125 @@ public final class MasterProcedureProtos {
"e.pb.UserInformation\022\'\n\ntable_name\030\002 \002(\013" +
"2\023.hbase.pb.TableName\022)\n\013region_info\030\003 \003" +
"(\0132\024.hbase.pb.RegionInfo\022\020\n\010forcible\030\004 \001" +
- "(\010\"\201\002\n\024ServerCrashStateData\022)\n\013server_na",
- "me\030\001 \002(\0132\024.hbase.pb.ServerName\022\036\n\026distri" +
- "buted_log_replay\030\002 \001(\010\0227\n\031regions_on_cra" +
- "shed_server\030\003 \003(\0132\024.hbase.pb.RegionInfo\022" +
- ".\n\020regions_assigned\030\004 \003(\0132\024.hbase.pb.Reg" +
- "ionInfo\022\025\n\rcarrying_meta\030\005 \001(\010\022\036\n\020should" +
- "_split_wal\030\006 \001(\010:\004true*\330\001\n\020CreateTableSt" +
- "ate\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034C" +
- "REATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_" +
- "TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSI" +
- "GN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC",
- "_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020" +
- "\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY_TABLE_P" +
- "REPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002" +
- "\022(\n$MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR" +
- "\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_COLUMN" +
- "\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n" +
- "\033MODIFY_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY" +
- "_TABLE_REOPEN_ALL_REGIONS\020\007*\212\002\n\022Truncate" +
- "TableState\022 \n\034TRUNCATE_TABLE_PRE_OPERATI" +
- "ON\020\001\022#\n\037TRUNCATE_TABLE_REMOVE_FROM_META\020",
- "\002\022\"\n\036TRUNCATE_TABLE_CLEAR_FS_LAYOUT\020\003\022#\n" +
- "\037TRUNCATE_TABLE_CREATE_FS_LAYOUT\020\004\022\036\n\032TR" +
- "UNCATE_TABLE_ADD_TO_META\020\005\022!\n\035TRUNCATE_T" +
- "ABLE_ASSIGN_REGIONS\020\006\022!\n\035TRUNCATE_TABLE_" +
- "POST_OPERATION\020\007*\337\001\n\020DeleteTableState\022\036\n" +
- "\032DELETE_TABLE_PRE_OPERATION\020\001\022!\n\035DELETE_" +
- "TABLE_REMOVE_FROM_META\020\002\022 \n\034DELETE_TABLE" +
- "_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TABLE_UPDAT" +
- "E_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_UNASSIGN_" +
- "REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_OPERATION",
- "\020\006*\320\001\n\024CreateNamespaceState\022\034\n\030CREATE_NA" +
- "MESPACE_PREPARE\020\001\022%\n!CREATE_NAMESPACE_CR" +
- "EATE_DIRECTORY\020\002\022)\n%CREATE_NAMESPACE_INS" +
- "ERT_INTO_NS_TABLE\020\003\022\036\n\032CREATE_NAMESPACE_" +
- "UPDATE_ZK\020\004\022(\n$CREATE_NAMESPACE_SET_NAME" +
- "SPACE_QUOTA\020\005*z\n\024ModifyNamespaceState\022\034\n" +
- "\030MODIFY_NAMESPACE_PREPARE\020\001\022$\n MODIFY_NA" +
- "MESPACE_UPDATE_NS_TABLE\020\002\022\036\n\032MODIFY_NAME" +
- "SPACE_UPDATE_ZK\020\003*\332\001\n\024DeleteNamespaceSta" +
- "te\022\034\n\030DELETE_NAMESPACE_PREPARE\020\001\022)\n%DELE",
- "TE_NAMESPACE_DELETE_FROM_NS_TABLE\020\002\022#\n\037D" +
- "ELETE_NAMESPACE_REMOVE_FROM_ZK\020\003\022\'\n#DELE" +
- "TE_NAMESPACE_DELETE_DIRECTORIES\020\004\022+\n\'DEL" +
- "ETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA\020\005*\331" +
- "\001\n\024AddColumnFamilyState\022\035\n\031ADD_COLUMN_FA" +
- "MILY_PREPARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PRE_" +
- "OPERATION\020\002\022-\n)ADD_COLUMN_FAMILY_UPDATE_" +
- "TABLE_DESCRIPTOR\020\003\022$\n ADD_COLUMN_FAMILY_" +
- "POST_OPERATION\020\004\022(\n$ADD_COLUMN_FAMILY_RE" +
- "OPEN_ALL_REGIONS\020\005*\353\001\n\027ModifyColumnFamil",
- "yState\022 \n\034MODIFY_COLUMN_FAMILY_PREPARE\020\001" +
- "\022&\n\"MODIFY_COLUMN_FAMILY_PRE_OPERATION\020\002" +
- "\0220\n,MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DE" +
- "SCRIPTOR\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POST_" +
- "OPERATION\020\004\022+\n\'MODIFY_COLUMN_FAMILY_REOP" +
- "EN_ALL_REGIONS\020\005*\226\002\n\027DeleteColumnFamilyS" +
- "tate\022 \n\034DELETE_COLUMN_FAMILY_PREPARE\020\001\022&" +
- "\n\"DELETE_COLUMN_FAMILY_PRE_OPERATION\020\002\0220" +
- "\n,DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESC" +
- "RIPTOR\020\003\022)\n%DELETE_COLUMN_FAMILY_DELETE_",
- "FS_LAYOUT\020\004\022\'\n#DELETE_COLUMN_FAMILY_POST" +
- "_OPERATION\020\005\022+\n\'DELETE_COLUMN_FAMILY_REO" +
- "PEN_ALL_REGIONS\020\006*\350\001\n\020EnableTableState\022\030" +
- "\n\024ENABLE_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TABLE" +
- "_PRE_OPERATION\020\002\022)\n%ENABLE_TABLE_SET_ENA" +
- "BLING_TABLE_STATE\020\003\022$\n ENABLE_TABLE_MARK" +
- "_REGIONS_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_EN" +
- "ABLED_TABLE_STATE\020\005\022\037\n\033ENABLE_TABLE_POST" +
- "_OPERATION\020\006*\362\001\n\021DisableTableState\022\031\n\025DI" +
- "SABLE_TABLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE_P",
- "RE_OPERATION\020\002\022+\n\'DISABLE_TABLE_SET_DISA" +
- "BLING_TABLE_STATE\020\003\022&\n\"DISABLE_TABLE_MAR" +
- "K_REGIONS_OFFLINE\020\004\022*\n&DISABLE_TABLE_SET" +
- "_DISABLED_TABLE_STATE\020\005\022 \n\034DISABLE_TABLE" +
- "_POST_OPERATION\020\006*\346\001\n\022CloneSnapshotState" +
- "\022 \n\034CLONE_SNAPSHOT_PRE_OPERATION\020\001\022\"\n\036CL" +
- "ONE_SNAPSHOT_WRITE_FS_LAYOUT\020\002\022\036\n\032CLONE_" +
- "SNAPSHOT_ADD_TO_META\020\003\022!\n\035CLONE_SNAPSHOT" +
- "_ASSIGN_REGIONS\020\004\022$\n CLONE_SNAPSHOT_UPDA" +
- "TE_DESC_CACHE\020\005\022!\n\035CLONE_SNAPSHOT_POST_O",
- "PERATION\020\006*\260\001\n\024RestoreSnapshotState\022\"\n\036R" +
- "ESTORE_SNAPSHOT_PRE_OPERATION\020\001\022,\n(RESTO" +
- "RE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR\020\002\022$\n" +
- " RESTORE_SNAPSHOT_WRITE_FS_LAYOUT\020\003\022 \n\034R" +
- "ESTORE_SNAPSHOT_UPDATE_META\020\004*\376\001\n\033Dispat" +
- "chMergingRegionsState\022$\n DISPATCH_MERGIN" +
- "G_REGIONS_PREPARE\020\001\022*\n&DISPATCH_MERGING_" +
- "REGIONS_PRE_OPERATION\020\002\0223\n/DISPATCH_MERG" +
- "ING_REGIONS_MOVE_REGION_TO_SAME_RS\020\003\022+\n\'" +
- "DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS\020",
- "\004\022+\n\'DISPATCH_MERGING_REGIONS_POST_OPERA" +
- "TION\020\005*\234\002\n\020ServerCrashState\022\026\n\022SERVER_CR" +
- "ASH_START\020\001\022\035\n\031SERVER_CRASH_PROCESS_META" +
- "\020\002\022\034\n\030SERVER_CRASH_GET_REGIONS\020\003\022\036\n\032SERV" +
- "ER_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH" +
- "_SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH_PREPARE_LO" +
- "G_REPLAY\020\006\022\027\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033S" +
- "ERVER_CRASH_WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_C" +
- "RASH_FINISH\020dBR\n1org.apache.hadoop.hbase" +
- ".shaded.protobuf.generatedB\025MasterProced",
- "ureProtosH\001\210\001\001\240\001\001"
+ "(\010\"\350\001\n\031SplitTableRegionStateData\022,\n\tuser",
+ "_info\030\001 \002(\0132\031.hbase.pb.UserInformation\022\'" +
+ "\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableName\022" +
+ "0\n\022parent_region_info\030\003 \002(\0132\024.hbase.pb.R" +
+ "egionInfo\022\021\n\tsplit_row\030\004 \001(\014\022/\n\021child_re" +
+ "gion_info\030\005 \003(\0132\024.hbase.pb.RegionInfo\"\201\002" +
+ "\n\024ServerCrashStateData\022)\n\013server_name\030\001 " +
+ "\002(\0132\024.hbase.pb.ServerName\022\036\n\026distributed" +
+ "_log_replay\030\002 \001(\010\0227\n\031regions_on_crashed_" +
+ "server\030\003 \003(\0132\024.hbase.pb.RegionInfo\022.\n\020re" +
+ "gions_assigned\030\004 \003(\0132\024.hbase.pb.RegionIn",
+ "fo\022\025\n\rcarrying_meta\030\005 \001(\010\022\036\n\020should_spli" +
+ "t_wal\030\006 \001(\010:\004true*\330\001\n\020CreateTableState\022\036" +
+ "\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034CREATE" +
+ "_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TABLE" +
+ "_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSIGN_RE" +
+ "GIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC_CACH" +
+ "E\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020\006*\207\002\n" +
+ "\020ModifyTableState\022\030\n\024MODIFY_TABLE_PREPAR" +
+ "E\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002\022(\n$M" +
+ "ODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR\020\003\022&\n",
+ "\"MODIFY_TABLE_REMOVE_REPLICA_COLUMN\020\004\022!\n" +
+ "\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033MODI" +
+ "FY_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY_TABL" +
+ "E_REOPEN_ALL_REGIONS\020\007*\212\002\n\022TruncateTable" +
+ "State\022 \n\034TRUNCATE_TABLE_PRE_OPERATION\020\001\022" +
+ "#\n\037TRUNCATE_TABLE_REMOVE_FROM_META\020\002\022\"\n\036" +
+ "TRUNCATE_TABLE_CLEAR_FS_LAYOUT\020\003\022#\n\037TRUN" +
+ "CATE_TABLE_CREATE_FS_LAYOUT\020\004\022\036\n\032TRUNCAT" +
+ "E_TABLE_ADD_TO_META\020\005\022!\n\035TRUNCATE_TABLE_" +
+ "ASSIGN_REGIONS\020\006\022!\n\035TRUNCATE_TABLE_POST_",
+ "OPERATION\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELE" +
+ "TE_TABLE_PRE_OPERATION\020\001\022!\n\035DELETE_TABLE" +
+ "_REMOVE_FROM_META\020\002\022 \n\034DELETE_TABLE_CLEA" +
+ "R_FS_LAYOUT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DES" +
+ "C_CACHE\020\004\022!\n\035DELETE_TABLE_UNASSIGN_REGIO" +
+ "NS\020\005\022\037\n\033DELETE_TABLE_POST_OPERATION\020\006*\320\001" +
+ "\n\024CreateNamespaceState\022\034\n\030CREATE_NAMESPA" +
+ "CE_PREPARE\020\001\022%\n!CREATE_NAMESPACE_CREATE_" +
+ "DIRECTORY\020\002\022)\n%CREATE_NAMESPACE_INSERT_I" +
+ "NTO_NS_TABLE\020\003\022\036\n\032CREATE_NAMESPACE_UPDAT",
+ "E_ZK\020\004\022(\n$CREATE_NAMESPACE_SET_NAMESPACE" +
+ "_QUOTA\020\005*z\n\024ModifyNamespaceState\022\034\n\030MODI" +
+ "FY_NAMESPACE_PREPARE\020\001\022$\n MODIFY_NAMESPA" +
+ "CE_UPDATE_NS_TABLE\020\002\022\036\n\032MODIFY_NAMESPACE" +
+ "_UPDATE_ZK\020\003*\332\001\n\024DeleteNamespaceState\022\034\n" +
+ "\030DELETE_NAMESPACE_PREPARE\020\001\022)\n%DELETE_NA" +
+ "MESPACE_DELETE_FROM_NS_TABLE\020\002\022#\n\037DELETE" +
+ "_NAMESPACE_REMOVE_FROM_ZK\020\003\022\'\n#DELETE_NA" +
+ "MESPACE_DELETE_DIRECTORIES\020\004\022+\n\'DELETE_N" +
+ "AMESPACE_REMOVE_NAMESPACE_QUOTA\020\005*\331\001\n\024Ad",
+ "dColumnFamilyState\022\035\n\031ADD_COLUMN_FAMILY_" +
+ "PREPARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERA" +
+ "TION\020\002\022-\n)ADD_COLUMN_FAMILY_UPDATE_TABLE" +
+ "_DESCRIPTOR\020\003\022$\n ADD_COLUMN_FAMILY_POST_" +
+ "OPERATION\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_" +
+ "ALL_REGIONS\020\005*\353\001\n\027ModifyColumnFamilyStat" +
+ "e\022 \n\034MODIFY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"M" +
+ "ODIFY_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,M" +
+ "ODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIP" +
+ "TOR\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POST_OPERA",
+ "TION\020\004\022+\n\'MODIFY_COLUMN_FAMILY_REOPEN_AL" +
+ "L_REGIONS\020\005*\226\002\n\027DeleteColumnFamilyState\022" +
+ " \n\034DELETE_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DEL" +
+ "ETE_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,DEL" +
+ "ETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTO" +
+ "R\020\003\022)\n%DELETE_COLUMN_FAMILY_DELETE_FS_LA" +
+ "YOUT\020\004\022\'\n#DELETE_COLUMN_FAMILY_POST_OPER" +
+ "ATION\020\005\022+\n\'DELETE_COLUMN_FAMILY_REOPEN_A" +
+ "LL_REGIONS\020\006*\350\001\n\020EnableTableState\022\030\n\024ENA" +
+ "BLE_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_",
+ "OPERATION\020\002\022)\n%ENABLE_TABLE_SET_ENABLING" +
+ "_TABLE_STATE\020\003\022$\n ENABLE_TABLE_MARK_REGI" +
+ "ONS_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_ENABLED" +
+ "_TABLE_STATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPER" +
+ "ATION\020\006*\362\001\n\021DisableTableState\022\031\n\025DISABLE" +
+ "_TABLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OP" +
+ "ERATION\020\002\022+\n\'DISABLE_TABLE_SET_DISABLING" +
+ "_TABLE_STATE\020\003\022&\n\"DISABLE_TABLE_MARK_REG" +
+ "IONS_OFFLINE\020\004\022*\n&DISABLE_TABLE_SET_DISA" +
+ "BLED_TABLE_STATE\020\005\022 \n\034DISABLE_TABLE_POST",
+ "_OPERATION\020\006*\346\001\n\022CloneSnapshotState\022 \n\034C" +
+ "LONE_SNAPSHOT_PRE_OPERATION\020\001\022\"\n\036CLONE_S" +
+ "NAPSHOT_WRITE_FS_LAYOUT\020\002\022\036\n\032CLONE_SNAPS" +
+ "HOT_ADD_TO_META\020\003\022!\n\035CLONE_SNAPSHOT_ASSI" +
+ "GN_REGIONS\020\004\022$\n CLONE_SNAPSHOT_UPDATE_DE" +
+ "SC_CACHE\020\005\022!\n\035CLONE_SNAPSHOT_POST_OPERAT" +
+ "ION\020\006*\260\001\n\024RestoreSnapshotState\022\"\n\036RESTOR" +
+ "E_SNAPSHOT_PRE_OPERATION\020\001\022,\n(RESTORE_SN" +
+ "APSHOT_UPDATE_TABLE_DESCRIPTOR\020\002\022$\n REST" +
+ "ORE_SNAPSHOT_WRITE_FS_LAYOUT\020\003\022 \n\034RESTOR",
+ "E_SNAPSHOT_UPDATE_META\020\004*\376\001\n\033DispatchMer" +
+ "gingRegionsState\022$\n DISPATCH_MERGING_REG" +
+ "IONS_PREPARE\020\001\022*\n&DISPATCH_MERGING_REGIO" +
+ "NS_PRE_OPERATION\020\002\0223\n/DISPATCH_MERGING_R" +
+ "EGIONS_MOVE_REGION_TO_SAME_RS\020\003\022+\n\'DISPA" +
+ "TCH_MERGING_REGIONS_DO_MERGE_IN_RS\020\004\022+\n\'" +
+ "DISPATCH_MERGING_REGIONS_POST_OPERATION\020" +
+ "\005*\305\003\n\025SplitTableRegionState\022\036\n\032SPLIT_TAB" +
+ "LE_REGION_PREPARE\020\001\022$\n SPLIT_TABLE_REGIO" +
+ "N_PRE_OPERATION\020\002\0220\n,SPLIT_TABLE_REGION_",
+ "SET_SPLITTING_TABLE_STATE\020\003\022+\n\'SPLIT_TAB" +
+ "LE_REGION_CLOSED_PARENT_REGION\020\004\022.\n*SPLI" +
+ "T_TABLE_REGION_CREATE_DAUGHTER_REGIONS\020\005" +
+ "\0220\n,SPLIT_TABLE_REGION_PRE_OPERATION_BEF" +
+ "ORE_PONR\020\006\022\"\n\036SPLIT_TABLE_REGION_UPDATE_" +
+ "META\020\007\022/\n+SPLIT_TABLE_REGION_PRE_OPERATI" +
+ "ON_AFTER_PONR\020\010\022)\n%SPLIT_TABLE_REGION_OP" +
+ "EN_CHILD_REGIONS\020\t\022%\n!SPLIT_TABLE_REGION" +
+ "_POST_OPERATION\020\n*\234\002\n\020ServerCrashState\022\026" +
+ "\n\022SERVER_CRASH_START\020\001\022\035\n\031SERVER_CRASH_P",
+ "ROCESS_META\020\002\022\034\n\030SERVER_CRASH_GET_REGION" +
+ "S\020\003\022\036\n\032SERVER_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027S" +
+ "ERVER_CRASH_SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH" +
+ "_PREPARE_LOG_REPLAY\020\006\022\027\n\023SERVER_CRASH_AS" +
+ "SIGN\020\010\022\037\n\033SERVER_CRASH_WAIT_ON_ASSIGN\020\t\022" +
+ "\027\n\023SERVER_CRASH_FINISH\020dBR\n1org.apache.h" +
+ "adoop.hbase.shaded.protobuf.generatedB\025M" +
+ "asterProcedureProtosH\001\210\001\001\240\001\001"
};
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@@ -22945,8 +24615,14 @@ public final class MasterProcedureProtos {
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_DispatchMergingRegionsStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", "Forcible", });
- internal_static_hbase_pb_ServerCrashStateData_descriptor =
+ internal_static_hbase_pb_SplitTableRegionStateData_descriptor =
getDescriptor().getMessageTypes().get(16);
+ internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable = new
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_hbase_pb_SplitTableRegionStateData_descriptor,
+ new java.lang.String[] { "UserInfo", "TableName", "ParentRegionInfo", "SplitRow", "ChildRegionInfo", });
+ internal_static_hbase_pb_ServerCrashStateData_descriptor =
+ getDescriptor().getMessageTypes().get(17);
internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_ServerCrashStateData_descriptor,
diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index b8712b7..8f368e9 100644
--- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -8822,6 +8822,1348 @@ public final class RegionServerStatusProtos {
}
+ public interface SplitTableRegionRequestOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionRequest)
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ boolean hasRegionInfo();
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
+
+ /**
+ * required bytes split_row = 2;
+ */
+ boolean hasSplitRow();
+ /**
+ * required bytes split_row = 2;
+ */
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow();
+
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ boolean hasNonceGroup();
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ long getNonceGroup();
+
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ boolean hasNonce();
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ long getNonce();
+ }
+ /**
+ * + ** + * Splits the specified region. + *+ * + * Protobuf type {@code hbase.pb.SplitTableRegionRequest} + */ + public static final class SplitTableRegionRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionRequest) + SplitTableRegionRequestOrBuilder { + // Use SplitTableRegionRequest.newBuilder() to construct. + private SplitTableRegionRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private SplitTableRegionRequest() { + splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + nonceGroup_ = 0L; + nonce_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitTableRegionRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + splitRow_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + nonceGroup_ = input.readUInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + nonce_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.Builder.class); + } + + private int bitField0_; + public static final int REGION_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + *
required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public boolean hasRegionInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+ return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+ return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
+ }
+
+ public static final int SPLIT_ROW_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_;
+ /**
+ * required bytes split_row = 2;
+ */
+ public boolean hasSplitRow() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bytes split_row = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() {
+ return splitRow_;
+ }
+
+ public static final int NONCE_GROUP_FIELD_NUMBER = 3;
+ private long nonceGroup_;
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+
+ public static final int NONCE_FIELD_NUMBER = 4;
+ private long nonce_;
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ if (!hasRegionInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSplitRow()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getRegionInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, getRegionInfo());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, splitRow_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt64(4, nonce_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, getRegionInfo());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, splitRow_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(4, nonce_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest) obj;
+
+ boolean result = true;
+ result = result && (hasRegionInfo() == other.hasRegionInfo());
+ if (hasRegionInfo()) {
+ result = result && getRegionInfo()
+ .equals(other.getRegionInfo());
+ }
+ result = result && (hasSplitRow() == other.hasSplitRow());
+ if (hasSplitRow()) {
+ result = result && getSplitRow()
+ .equals(other.getSplitRow());
+ }
+ result = result && (hasNonceGroup() == other.hasNonceGroup());
+ if (hasNonceGroup()) {
+ result = result && (getNonceGroup()
+ == other.getNonceGroup());
+ }
+ result = result && (hasNonce() == other.hasNonce());
+ if (hasNonce()) {
+ result = result && (getNonce()
+ == other.getNonce());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasRegionInfo()) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfo().hashCode();
+ }
+ if (hasSplitRow()) {
+ hash = (37 * hash) + SPLIT_ROW_FIELD_NUMBER;
+ hash = (53 * hash) + getSplitRow().hashCode();
+ }
+ if (hasNonceGroup()) {
+ hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER;
+ hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+ getNonceGroup());
+ }
+ if (hasNonce()) {
+ hash = (37 * hash) + NONCE_FIELD_NUMBER;
+ hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+ getNonce());
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + ** + * Splits the specified region. + *+ * + * Protobuf type {@code hbase.pb.SplitTableRegionRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public boolean hasRegionInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
+ } else {
+ return regionInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public Builder setRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ regionInfo_ = value;
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public Builder setRegionInfo(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public Builder mergeRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ regionInfo_ != null &&
+ regionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+ regionInfo_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
+ } else {
+ regionInfo_ = value;
+ }
+ onChanged();
+ } else {
+ regionInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = null;
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getRegionInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return regionInfo_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
+ }
+ }
+ /**
+ * required .hbase.pb.RegionInfo region_info = 1;
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ getRegionInfo(),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
+ /**
+ * required bytes split_row = 2;
+ */
+ public boolean hasSplitRow() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bytes split_row = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() {
+ return splitRow_;
+ }
+ /**
+ * required bytes split_row = 2;
+ */
+ public Builder setSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ splitRow_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bytes split_row = 2;
+ */
+ public Builder clearSplitRow() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ splitRow_ = getDefaultInstance().getSplitRow();
+ onChanged();
+ return this;
+ }
+
+ private long nonceGroup_ ;
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ public Builder setNonceGroup(long value) {
+ bitField0_ |= 0x00000004;
+ nonceGroup_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce_group = 3 [default = 0];
+ */
+ public Builder clearNonceGroup() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ nonceGroup_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ private long nonce_ ;
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ public Builder setNonce(long value) {
+ bitField0_ |= 0x00000008;
+ nonce_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce = 4 [default = 0];
+ */
+ public Builder clearNonce() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ nonce_ = 0L;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.SplitTableRegionRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.SplitTableRegionRequest)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parseroptional uint64 proc_id = 1;
+ */
+ boolean hasProcId();
+ /**
+ * optional uint64 proc_id = 1;
+ */
+ long getProcId();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.SplitTableRegionResponse}
+ */
+ public static final class SplitTableRegionResponse extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionResponse)
+ SplitTableRegionResponseOrBuilder {
+ // Use SplitTableRegionResponse.newBuilder() to construct.
+ private SplitTableRegionResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+ private SplitTableRegionResponse() {
+ procId_ = 0L;
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SplitTableRegionResponse(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ int mutable_bitField0_ = 0;
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ procId_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+ e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionResponse_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.Builder.class);
+ }
+
+ private int bitField0_;
+ public static final int PROC_ID_FIELD_NUMBER = 1;
+ private long procId_;
+ /**
+ * optional uint64 proc_id = 1;
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional uint64 proc_id = 1;
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, procId_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, procId_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse) obj;
+
+ boolean result = true;
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+ getProcId());
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.SplitTableRegionResponse}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builderoptional uint64 proc_id = 1;
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional uint64 proc_id = 1;
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * optional uint64 proc_id = 1;
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000001;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 proc_id = 1;
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.SplitTableRegionResponse)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.SplitTableRegionResponse)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser+ ** + * Split region + *+ * + *
rpc SplitRegion(.hbase.pb.SplitTableRegionRequest) returns (.hbase.pb.SplitTableRegionResponse);
+ */
+ public abstract void splitRegion(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback+ ** + * Get procedure result + *+ * + *
rpc getProcedureResult(.hbase.pb.GetProcedureResultRequest) returns (.hbase.pb.GetProcedureResultResponse);
+ */
+ public abstract void getProcedureResult(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback+ ** + * Split region + *+ * + *
rpc SplitRegion(.hbase.pb.SplitTableRegionRequest) returns (.hbase.pb.SplitTableRegionResponse);
+ */
+ public abstract void splitRegion(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback+ ** + * Get procedure result + *+ * + *
rpc getProcedureResult(.hbase.pb.GetProcedureResultRequest) returns (.hbase.pb.GetProcedureResultResponse);
+ */
+ public abstract void getProcedureResult(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
* This method is synchronous.
@@ -274,6 +286,9 @@ service AdminService {
rpc CloseRegion(CloseRegionRequest)
returns(CloseRegionResponse);
+ rpc CloseRegionForSplit(CloseRegionForSplitRequest)
+ returns(CloseRegionForSplitResponse);
+
rpc FlushRegion(FlushRegionRequest)
returns(FlushRegionResponse);
diff --git hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 0e0b385..f34a7ff 100644
--- hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -277,6 +277,27 @@ message DispatchMergingRegionsStateData {
optional bool forcible = 4;
}
+enum SplitTableRegionState {
+ SPLIT_TABLE_REGION_PREPARE = 1;
+ SPLIT_TABLE_REGION_PRE_OPERATION = 2;
+ SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3;
+ SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4;
+ SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5;
+ SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6;
+ SPLIT_TABLE_REGION_UPDATE_META = 7;
+ SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 8;
+ SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
+ SPLIT_TABLE_REGION_POST_OPERATION = 10;
+}
+
+message SplitTableRegionStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required RegionInfo parent_region_info = 3;
+ optional bytes split_row = 4;
+ repeated RegionInfo child_region_info = 5;
+}
+
message ServerCrashStateData {
required ServerName server_name = 1;
optional bool distributed_log_replay = 2;
diff --git hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 60cf77a..1c373ee 100644
--- hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -26,6 +26,7 @@ option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
+import "Master.proto";
import "ClusterStatus.proto";
message RegionServerStartupRequest {
@@ -126,6 +127,20 @@ message ReportRegionStateTransitionResponse {
optional string error_message = 1;
}
+/**
+ * Splits the specified region.
+ */
+message SplitTableRegionRequest {
+ required RegionInfo region_info = 1;
+ required bytes split_row = 2;
+ optional uint64 nonce_group = 3 [default = 0];
+ optional uint64 nonce = 4 [default = 0];
+}
+
+message SplitTableRegionResponse {
+ optional uint64 proc_id = 1;
+}
+
service RegionServerStatusService {
/** Called when a region server first starts. */
rpc RegionServerStartup(RegionServerStartupRequest)
@@ -155,4 +170,16 @@ service RegionServerStatusService {
*/
rpc ReportRegionStateTransition(ReportRegionStateTransitionRequest)
returns(ReportRegionStateTransitionResponse);
+
+ /**
+ * Split region
+ */
+ rpc SplitRegion(SplitTableRegionRequest)
+ returns(SplitTableRegionResponse);
+
+ /**
+ * Get procedure result
+ */
+ rpc getProcedureResult(GetProcedureResultRequest)
+ returns(GetProcedureResultResponse);
}
diff --git hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index ed07c5c..70167bb 100644
--- hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.MasterSwitchType;
+import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@@ -80,7 +81,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdmi
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
-
@InterfaceAudience.Private
public class RSGroupAdminEndpoint extends RSGroupAdminService
implements CoprocessorService, Coprocessor, MasterObserver {
@@ -1098,4 +1098,42 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
public void postListProcedures(ObserverContext