diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 9300372..85af727 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -985,6 +985,8 @@ public interface Admin extends Abortable, Closeable { * * @param regionName region to split * @throws IOException if a remote or network exception occurs + * @deprecated Since 2.0. Will be removed in 3.0. Use + * {@link #splitRegionAsync(byte[], byte[])} instead. */ void splitRegion(final byte[] regionName) throws IOException; @@ -1004,11 +1006,22 @@ public interface Admin extends Abortable, Closeable { * @param regionName region to split * @param splitPoint the explicit position to split on * @throws IOException if a remote or network exception occurs + * @deprecated Since 2.0. Will be removed in 3.0. Use + * {@link #splitRegionAsync(byte[], byte[])} instead. */ void splitRegion(final byte[] regionName, final byte[] splitPoint) throws IOException; /** + * Split an individual region. Asynchronous operation. + * @param regionName region to split + * @param splitPoint the explicit position to split on + * @throws IOException if a remote or network exception occurs + */ + Future splitRegionAsync(byte[] regionName, byte[] splitPoint) + throws IOException; + + /** * Modify an existing table, more IRB friendly version. * * @param tableName name of table. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 1c6ea03..a10992b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -177,6 +177,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormali import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; @@ -1637,6 +1639,67 @@ public class HBaseAdmin implements Admin { } @Override + public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) + throws IOException { + byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ? + regionName : HRegionInfo.encodeRegionName(splitPoint).getBytes(); + Pair pair = getRegion(regionName); + if (pair != null) { + if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { + throw new IllegalArgumentException ("Can't invoke split on non-default regions directly"); + } + } else { + throw new UnknownRegionException ( + "Can't invoke merge on unknown region " + + Bytes.toStringBinary(encodedNameofRegionToSplit)); + } + + HRegionInfo hri = pair.getFirst(); + return splitRegionAsync(hri, splitPoint); + } + + public Future splitRegionAsync(HRegionInfo hri, byte[] splitPoint) throws IOException { + TableName tableName = hri.getTable(); + if (hri.getStartKey() != null && splitPoint != null && + Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { + throw new IOException("should not give a splitkey which equals to startkey!"); + } + + SplitTableRegionResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + @Override + protected SplitTableRegionResponse rpcCall() throws Exception { + setPriority(tableName); + SplitTableRegionRequest request = RequestConverter + .buildSplitTableRegionRequest(hri, splitPoint, ng.getNonceGroup(), ng.newNonce()); + return master.splitRegion(getRpcController(), request); + } + }); + return new SplitTableRegionFuture(this, tableName, response); + } + + private static class SplitTableRegionFuture extends TableFuture { + public SplitTableRegionFuture(final HBaseAdmin admin, + final TableName tableName, + final SplitTableRegionResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); + } + + public SplitTableRegionFuture( + final HBaseAdmin admin, + final TableName tableName, + final Long procId) { + super(admin, tableName, procId); + } + + @Override + public String getOperationType() { + return "SPLIT_REGIONS"; + } + } + + @Override public void split(final TableName tableName) throws IOException { split(tableName, null); } @@ -1646,9 +1709,6 @@ public class HBaseAdmin implements Admin { splitRegion(regionName, null); } - /** - * {@inheritDoc} - */ @Override public void split(final TableName tableName, final byte [] splitPoint) throws IOException { ZooKeeperWatcher zookeeper = null; @@ -1671,8 +1731,8 @@ public class HBaseAdmin implements Admin { // if a split point given, only split that particular region if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || (splitPoint != null && !r.containsRow(splitPoint))) continue; - // call out to region server to do split now - split(pair.getSecond(), pair.getFirst(), splitPoint); + // call out to master to do split now + splitRegionAsync(pair.getFirst(), splitPoint); } } finally { if (zookeeper != null) { @@ -1695,23 +1755,7 @@ public class HBaseAdmin implements Admin { if (regionServerPair.getSecond() == null) { throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); } - split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint); - } - - @VisibleForTesting - public void split(final ServerName sn, final HRegionInfo hri, - byte[] splitPoint) throws IOException { - if (hri.getStartKey() != null && splitPoint != null && - Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { - throw new IOException("should not give a splitkey which equals to startkey!"); - } - // TODO: There is no timeout on this controller. Set one! - HBaseRpcController controller = rpcControllerFactory.newController(); - controller.setPriority(hri.getTable()); - - // TODO: this does not do retries, it should. Set priority and timeout in controller - AdminService.BlockingInterface admin = this.connection.getAdmin(sn); - ProtobufUtil.split(controller, admin, hri, splitPoint); + splitRegionAsync(regionServerPair.getFirst(), splitPoint); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 67f7d0a..fbbf299 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; @@ -117,6 +118,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleaner import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest; @@ -791,6 +794,14 @@ public final class RequestConverter { return builder.build(); } + public static GetBestSplitPointRequest + builGetBestSplitPointRequest(final byte[] regionName) { + GetBestSplitPointRequest.Builder builder = GetBestSplitPointRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + return builder.build(); + } + /** * Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table. * @@ -1119,6 +1130,21 @@ public final class RequestConverter { return builder.build(); } + public static SplitTableRegionRequest buildSplitTableRegionRequest( + final HRegionInfo regionInfo, + final byte[] splitRow, + final long nonceGroup, + final long nonce) throws DeserializationException { + SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder(); + builder.setRegionInfo(HRegionInfo.convert(regionInfo)); + if (splitRow != null) { + builder.setSplitRow(UnsafeByteOperations.unsafeWrap(splitRow)); + } + builder.setNonceGroup(nonceGroup); + builder.setNonce(nonce); + return builder.build(); + } + /** * Create a protocol buffer AssignRegionRequest * diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java index 812cf3b..33f69ad 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java @@ -1819,6 +1819,1067 @@ public final class AdminProtos { } + public interface GetBestSplitPointRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.GetBestSplitPointRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetBestSplitPointRequest} + */ + public static final class GetBestSplitPointRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.GetBestSplitPointRequest) + GetBestSplitPointRequestOrBuilder { + // Use GetBestSplitPointRequest.newBuilder() to construct. + private GetBestSplitPointRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetBestSplitPointRequest() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetBestSplitPointRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.Builder.class); + } + + private int bitField0_; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getRegion()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getRegion()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetBestSplitPointRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.GetBestSplitPointRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = null; + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + return false; + } + if (!getRegion().isInitialized()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier region_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != null && + region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = null; + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + getRegion(), + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetBestSplitPointRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetBestSplitPointRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public GetBestSplitPointRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new GetBestSplitPointRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface GetBestSplitPointResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.GetBestSplitPointResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * optional bytes splitPoint = 1; + */ + boolean hasSplitPoint(); + /** + * optional bytes splitPoint = 1; + */ + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitPoint(); + } + /** + * Protobuf type {@code hbase.pb.GetBestSplitPointResponse} + */ + public static final class GetBestSplitPointResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.GetBestSplitPointResponse) + GetBestSplitPointResponseOrBuilder { + // Use GetBestSplitPointResponse.newBuilder() to construct. + private GetBestSplitPointResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetBestSplitPointResponse() { + splitPoint_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetBestSplitPointResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + splitPoint_ = input.readBytes(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.Builder.class); + } + + private int bitField0_; + public static final int SPLITPOINT_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitPoint_; + /** + * optional bytes splitPoint = 1; + */ + public boolean hasSplitPoint() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes splitPoint = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitPoint() { + return splitPoint_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, splitPoint_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBytesSize(1, splitPoint_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse) obj; + + boolean result = true; + result = result && (hasSplitPoint() == other.hasSplitPoint()); + if (hasSplitPoint()) { + result = result && getSplitPoint() + .equals(other.getSplitPoint()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSplitPoint()) { + hash = (37 * hash) + SPLITPOINT_FIELD_NUMBER; + hash = (53 * hash) + getSplitPoint().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetBestSplitPointResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.GetBestSplitPointResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + splitPoint_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetBestSplitPointResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.splitPoint_ = splitPoint_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.getDefaultInstance()) return this; + if (other.hasSplitPoint()) { + setSplitPoint(other.getSplitPoint()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitPoint_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes splitPoint = 1; + */ + public boolean hasSplitPoint() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes splitPoint = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitPoint() { + return splitPoint_; + } + /** + * optional bytes splitPoint = 1; + */ + public Builder setSplitPoint(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + splitPoint_ = value; + onChanged(); + return this; + } + /** + * optional bytes splitPoint = 1; + */ + public Builder clearSplitPoint() { + bitField0_ = (bitField0_ & ~0x00000001); + splitPoint_ = getDefaultInstance().getSplitPoint(); + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetBestSplitPointResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetBestSplitPointResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public GetBestSplitPointResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new GetBestSplitPointResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + public interface GetStoreFileRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.GetStoreFileRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -28357,6 +29418,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** + * rpc GetBestSplitPoint(.hbase.pb.GetBestSplitPointRequest) returns (.hbase.pb.GetBestSplitPointResponse); + */ + public abstract void getBestSplitPoint( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** * rpc GetStoreFile(.hbase.pb.GetStoreFileRequest) returns (.hbase.pb.GetStoreFileResponse); */ public abstract void getStoreFile( @@ -28534,6 +29603,14 @@ public final class AdminProtos { } @java.lang.Override + public void getBestSplitPoint( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.getBestSplitPoint(controller, request, done); + } + + @java.lang.Override public void getStoreFile( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest request, @@ -28718,44 +29795,46 @@ public final class AdminProtos { case 0: return impl.getRegionInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest)request); case 1: - return impl.getStoreFile(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest)request); + return impl.getBestSplitPoint(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest)request); case 2: - return impl.getOnlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest)request); + return impl.getStoreFile(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest)request); case 3: - return impl.openRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest)request); + return impl.getOnlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest)request); case 4: - return impl.warmupRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest)request); + return impl.openRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest)request); case 5: - return impl.closeRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest)request); + return impl.warmupRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest)request); case 6: - return impl.flushRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest)request); + return impl.closeRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest)request); case 7: - return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest)request); + return impl.flushRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest)request); case 8: - return impl.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request); + return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest)request); case 9: - return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request); case 10: - return impl.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 11: - return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request); + return impl.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 12: - return impl.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request); + return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request); case 13: - return impl.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request); + return impl.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request); case 14: - return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); + return impl.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request); case 15: - return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); + return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); case 16: - return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request); + return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); case 17: - return impl.clearCompactionQueues(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest)request); + return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request); case 18: - return impl.getSpaceQuotaSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest)request); + return impl.clearCompactionQueues(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest)request); case 19: - return impl.executeProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest)request); + return impl.getSpaceQuotaSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest)request); case 20: + return impl.executeProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest)request); + case 21: return impl.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -28774,44 +29853,46 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest.getDefaultInstance(); case 20: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance(); + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -28830,44 +29911,46 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.getDefaultInstance(); case 20: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(); + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -28886,6 +29969,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); /** + * rpc GetBestSplitPoint(.hbase.pb.GetBestSplitPointRequest) returns (.hbase.pb.GetBestSplitPointResponse); + */ + public abstract void getBestSplitPoint( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** * rpc GetStoreFile(.hbase.pb.GetStoreFileRequest) returns (.hbase.pb.GetStoreFileResponse); */ public abstract void getStoreFile( @@ -29077,101 +30168,106 @@ public final class AdminProtos { done)); return; case 1: + this.getBestSplitPoint(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: this.getStoreFile(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 2: + case 3: this.getOnlineRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 3: + case 4: this.openRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 4: + case 5: this.warmupRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 5: + case 6: this.closeRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 6: + case 7: this.flushRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 7: + case 8: this.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 8: + case 9: this.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 9: + case 10: this.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 10: + case 11: this.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 11: + case 12: this.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 13: this.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 14: this.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 15: this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 16: this.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 16: + case 17: this.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 17: + case 18: this.clearCompactionQueues(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 18: + case 19: this.getSpaceQuotaSnapshots(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 19: + case 20: this.executeProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 20: + case 21: this.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -29193,44 +30289,46 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest.getDefaultInstance(); case 20: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance(); + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -29249,44 +30347,46 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.getDefaultInstance(); case 2: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); case 3: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); case 17: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); case 18: - return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse.getDefaultInstance(); case 19: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.getDefaultInstance(); case 20: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(); + case 21: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -29324,12 +30424,27 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance())); } + public void getBestSplitPoint( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.getDefaultInstance())); + } + public void getStoreFile( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(1), + getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(), @@ -29344,7 +30459,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(2), + getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(), @@ -29359,7 +30474,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(3), + getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(), @@ -29374,7 +30489,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(), @@ -29389,7 +30504,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(), @@ -29404,7 +30519,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(), @@ -29419,7 +30534,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(), @@ -29434,7 +30549,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(), @@ -29449,7 +30564,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -29464,7 +30579,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -29479,7 +30594,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(), @@ -29494,7 +30609,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(), @@ -29509,7 +30624,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(), @@ -29524,7 +30639,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(), @@ -29539,7 +30654,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(), @@ -29554,7 +30669,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(), @@ -29569,7 +30684,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse.getDefaultInstance(), @@ -29584,7 +30699,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.getDefaultInstance(), @@ -29599,7 +30714,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(), @@ -29614,7 +30729,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(20), + getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(), @@ -29636,6 +30751,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse getBestSplitPoint( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse getStoreFile( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest request) @@ -29756,12 +30876,24 @@ public final class AdminProtos { } + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse getBestSplitPoint( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse getStoreFile( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), + getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance()); @@ -29773,7 +30905,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), + getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance()); @@ -29785,7 +30917,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), + getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance()); @@ -29797,7 +30929,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance()); @@ -29809,7 +30941,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance()); @@ -29821,7 +30953,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance()); @@ -29833,7 +30965,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance()); @@ -29845,7 +30977,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance()); @@ -29857,7 +30989,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -29869,7 +31001,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -29881,7 +31013,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance()); @@ -29893,7 +31025,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance()); @@ -29905,7 +31037,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance()); @@ -29917,7 +31049,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()); @@ -29929,7 +31061,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); @@ -29941,7 +31073,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()); @@ -29953,7 +31085,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse.getDefaultInstance()); @@ -29965,7 +31097,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.getDefaultInstance()); @@ -29977,7 +31109,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance()); @@ -29989,7 +31121,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(20), + getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()); @@ -30011,6 +31143,16 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_GetRegionInfoResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetBestSplitPointRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_GetBestSplitPointRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetBestSplitPointResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_GetBestSplitPointResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_GetStoreFileRequest_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -30230,131 +31372,136 @@ public final class AdminProtos { "\n\014isRecovering\030\003 \001(\010\022\022\n\nsplittable\030\004 \001(\010" + "\022\021\n\tmergeable\030\005 \001(\010\"F\n\017CompactionState\022\010", "\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJOR\020\002\022\023\n\017MAJOR_" + - "AND_MINOR\020\003\"P\n\023GetStoreFileRequest\022)\n\006re" + + "AND_MINOR\020\003\"E\n\030GetBestSplitPointRequest\022" + + ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" + + "er\"/\n\031GetBestSplitPointResponse\022\022\n\nsplit" + + "Point\030\001 \001(\014\"P\n\023GetStoreFileRequest\022)\n\006re" + "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\016\n" + "\006family\030\002 \003(\014\"*\n\024GetStoreFileResponse\022\022\n" + "\nstore_file\030\001 \003(\t\"\030\n\026GetOnlineRegionRequ" + "est\"D\n\027GetOnlineRegionResponse\022)\n\013region" + - "_info\030\001 \003(\0132\024.hbase.pb.RegionInfo\"\263\002\n\021Op" + + "_info\030\001 \003(\0132\024.hbase.pb.RegionInfo\"\263\002\n\021Op", "enRegionRequest\022=\n\topen_info\030\001 \003(\0132*.hba" + "se.pb.OpenRegionRequest.RegionOpenInfo\022\027" + - "\n\017serverStartCode\030\002 \001(\004\022\032\n\022master_system", + "\n\017serverStartCode\030\002 \001(\004\022\032\n\022master_system" + "_time\030\005 \001(\004\032\251\001\n\016RegionOpenInfo\022$\n\006region" + "\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\037\n\027version_" + "of_offline_node\030\002 \001(\r\022+\n\rfavored_nodes\030\003" + " \003(\0132\024.hbase.pb.ServerName\022#\n\033openForDis" + "tributedLogReplay\030\004 \001(\010\"\246\001\n\022OpenRegionRe" + "sponse\022F\n\ropening_state\030\001 \003(\0162/.hbase.pb" + - ".OpenRegionResponse.RegionOpeningState\"H" + + ".OpenRegionResponse.RegionOpeningState\"H", "\n\022RegionOpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALRE" + "ADY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"?\n\023Warm" + - "upRegionRequest\022(\n\nregionInfo\030\001 \002(\0132\024.hb", + "upRegionRequest\022(\n\nregionInfo\030\001 \002(\0132\024.hb" + "ase.pb.RegionInfo\"\026\n\024WarmupRegionRespons" + "e\"\313\001\n\022CloseRegionRequest\022)\n\006region\030\001 \002(\013" + "2\031.hbase.pb.RegionSpecifier\022\037\n\027version_o" + "f_closing_node\030\002 \001(\r\022\036\n\020transition_in_ZK" + "\030\003 \001(\010:\004true\0220\n\022destination_server\030\004 \001(\013" + "2\024.hbase.pb.ServerName\022\027\n\017serverStartCod" + - "e\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016\n\006closed" + + "e\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016\n\006closed", "\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006region\030\001" + " \002(\0132\031.hbase.pb.RegionSpecifier\022\030\n\020if_ol" + - "der_than_ts\030\002 \001(\004\022\036\n\026write_flush_wal_mar", + "der_than_ts\030\002 \001(\004\022\036\n\026write_flush_wal_mar" + "ker\030\003 \001(\010\"_\n\023FlushRegionResponse\022\027\n\017last" + "_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026wr" + "ote_flush_wal_marker\030\003 \001(\010\"T\n\022SplitRegio" + "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" + "onSpecifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023Spli" + "tRegionResponse\"`\n\024CompactRegionRequest\022" + - ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" + + ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi", "er\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025Com" + "pactRegionResponse\"\315\001\n\031UpdateFavoredNode" + - "sRequest\022I\n\013update_info\030\001 \003(\01324.hbase.pb", + "sRequest\022I\n\013update_info\030\001 \003(\01324.hbase.pb" + ".UpdateFavoredNodesRequest.RegionUpdateI" + "nfo\032e\n\020RegionUpdateInfo\022$\n\006region\030\001 \002(\0132" + "\024.hbase.pb.RegionInfo\022+\n\rfavored_nodes\030\002" + " \003(\0132\024.hbase.pb.ServerName\".\n\032UpdateFavo" + "redNodesResponse\022\020\n\010response\030\001 \001(\r\"a\n\010WA" + "LEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n" + - "\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_cel" + + "\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_cel", "l_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryReque" + "st\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n" + - "\024replicationClusterId\030\002 \001(\t\022\"\n\032sourceBas", + "\024replicationClusterId\030\002 \001(\t\022\"\n\032sourceBas" + "eNamespaceDirPath\030\003 \001(\t\022!\n\031sourceHFileAr" + "chiveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryR" + "esponse\"\026\n\024RollWALWriterRequest\"0\n\025RollW" + "ALWriterResponse\022\027\n\017region_to_flush\030\001 \003(" + "\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024" + "\n\022StopServerResponse\"\026\n\024GetServerInfoReq" + - "uest\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132" + + "uest\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132", "\024.hbase.pb.ServerName\022\022\n\nwebui_port\030\002 \001(" + "\r\"B\n\025GetServerInfoResponse\022)\n\013server_inf" + - "o\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateC", + "o\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateC" + "onfigurationRequest\"\035\n\033UpdateConfigurati" + "onResponse\"?\n\024GetRegionLoadRequest\022\'\n\nta" + "ble_name\030\001 \001(\0132\023.hbase.pb.TableName\"C\n\025G" + "etRegionLoadResponse\022*\n\014region_loads\030\001 \003" + "(\0132\024.hbase.pb.RegionLoad\"2\n\034ClearCompact" + "ionQueuesRequest\022\022\n\nqueue_name\030\001 \003(\t\"\037\n\035" + - "ClearCompactionQueuesResponse\"\200\001\n\030Execut" + + "ClearCompactionQueuesResponse\"\200\001\n\030Execut", "eProceduresRequest\0220\n\013open_region\030\001 \003(\0132" + "\033.hbase.pb.OpenRegionRequest\0222\n\014close_re" + - "gion\030\002 \003(\0132\034.hbase.pb.CloseRegionRequest", + "gion\030\002 \003(\0132\034.hbase.pb.CloseRegionRequest" + "\"\203\001\n\031ExecuteProceduresResponse\0221\n\013open_r" + "egion\030\001 \003(\0132\034.hbase.pb.OpenRegionRespons" + "e\0223\n\014close_region\030\002 \003(\0132\035.hbase.pb.Close" + "RegionResponse\"\244\001\n\023MergeRegionsRequest\022+" + "\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpecif" + "ier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionS" + - "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\032\n\022mas" + + "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\032\n\022mas", "ter_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRes" + - "ponse2\216\016\n\014AdminService\022P\n\rGetRegionInfo\022" + - "\036.hbase.pb.GetRegionInfoRequest\032\037.hbase.", - "pb.GetRegionInfoResponse\022M\n\014GetStoreFile" + - "\022\035.hbase.pb.GetStoreFileRequest\032\036.hbase." + - "pb.GetStoreFileResponse\022V\n\017GetOnlineRegi" + - "on\022 .hbase.pb.GetOnlineRegionRequest\032!.h" + - "base.pb.GetOnlineRegionResponse\022G\n\nOpenR" + - "egion\022\033.hbase.pb.OpenRegionRequest\032\034.hba" + - "se.pb.OpenRegionResponse\022M\n\014WarmupRegion" + - "\022\035.hbase.pb.WarmupRegionRequest\032\036.hbase." + - "pb.WarmupRegionResponse\022J\n\013CloseRegion\022\034" + - ".hbase.pb.CloseRegionRequest\032\035.hbase.pb.", - "CloseRegionResponse\022J\n\013FlushRegion\022\034.hba" + - "se.pb.FlushRegionRequest\032\035.hbase.pb.Flus" + - "hRegionResponse\022J\n\013SplitRegion\022\034.hbase.p" + - "b.SplitRegionRequest\032\035.hbase.pb.SplitReg" + - "ionResponse\022P\n\rCompactRegion\022\036.hbase.pb." + - "CompactRegionRequest\032\037.hbase.pb.CompactR" + - "egionResponse\022\\\n\021ReplicateWALEntry\022\".hba" + - "se.pb.ReplicateWALEntryRequest\032#.hbase.p" + - "b.ReplicateWALEntryResponse\022Q\n\006Replay\022\"." + - "hbase.pb.ReplicateWALEntryRequest\032#.hbas", - "e.pb.ReplicateWALEntryResponse\022P\n\rRollWA" + - "LWriter\022\036.hbase.pb.RollWALWriterRequest\032" + - "\037.hbase.pb.RollWALWriterResponse\022P\n\rGetS" + - "erverInfo\022\036.hbase.pb.GetServerInfoReques" + - "t\032\037.hbase.pb.GetServerInfoResponse\022G\n\nSt" + - "opServer\022\033.hbase.pb.StopServerRequest\032\034." + - "hbase.pb.StopServerResponse\022_\n\022UpdateFav" + - "oredNodes\022#.hbase.pb.UpdateFavoredNodesR" + - "equest\032$.hbase.pb.UpdateFavoredNodesResp" + - "onse\022b\n\023UpdateConfiguration\022$.hbase.pb.U", - "pdateConfigurationRequest\032%.hbase.pb.Upd" + - "ateConfigurationResponse\022P\n\rGetRegionLoa" + - "d\022\036.hbase.pb.GetRegionLoadRequest\032\037.hbas" + - "e.pb.GetRegionLoadResponse\022h\n\025ClearCompa" + - "ctionQueues\022&.hbase.pb.ClearCompactionQu" + - "euesRequest\032\'.hbase.pb.ClearCompactionQu" + - "euesResponse\022k\n\026GetSpaceQuotaSnapshots\022\'" + - ".hbase.pb.GetSpaceQuotaSnapshotsRequest\032" + - "(.hbase.pb.GetSpaceQuotaSnapshotsRespons" + - "e\022\\\n\021ExecuteProcedures\022\".hbase.pb.Execut", - "eProceduresRequest\032#.hbase.pb.ExecutePro" + - "ceduresResponse\022M\n\014MergeRegions\022\035.hbase." + - "pb.MergeRegionsRequest\032\036.hbase.pb.MergeR" + - "egionsResponseBH\n1org.apache.hadoop.hbas" + - "e.shaded.protobuf.generatedB\013AdminProtos" + - "H\001\210\001\001\240\001\001" + "ponse2\354\016\n\014AdminService\022P\n\rGetRegionInfo\022" + + "\036.hbase.pb.GetRegionInfoRequest\032\037.hbase." + + "pb.GetRegionInfoResponse\022\\\n\021GetBestSplit" + + "Point\022\".hbase.pb.GetBestSplitPointReques" + + "t\032#.hbase.pb.GetBestSplitPointResponse\022M" + + "\n\014GetStoreFile\022\035.hbase.pb.GetStoreFileRe" + + "quest\032\036.hbase.pb.GetStoreFileResponse\022V\n" + + "\017GetOnlineRegion\022 .hbase.pb.GetOnlineReg" + + "ionRequest\032!.hbase.pb.GetOnlineRegionRes", + "ponse\022G\n\nOpenRegion\022\033.hbase.pb.OpenRegio" + + "nRequest\032\034.hbase.pb.OpenRegionResponse\022M" + + "\n\014WarmupRegion\022\035.hbase.pb.WarmupRegionRe" + + "quest\032\036.hbase.pb.WarmupRegionResponse\022J\n" + + "\013CloseRegion\022\034.hbase.pb.CloseRegionReque" + + "st\032\035.hbase.pb.CloseRegionResponse\022J\n\013Flu" + + "shRegion\022\034.hbase.pb.FlushRegionRequest\032\035" + + ".hbase.pb.FlushRegionResponse\022J\n\013SplitRe" + + "gion\022\034.hbase.pb.SplitRegionRequest\032\035.hba" + + "se.pb.SplitRegionResponse\022P\n\rCompactRegi", + "on\022\036.hbase.pb.CompactRegionRequest\032\037.hba" + + "se.pb.CompactRegionResponse\022\\\n\021Replicate" + + "WALEntry\022\".hbase.pb.ReplicateWALEntryReq" + + "uest\032#.hbase.pb.ReplicateWALEntryRespons" + + "e\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEntry" + + "Request\032#.hbase.pb.ReplicateWALEntryResp" + + "onse\022P\n\rRollWALWriter\022\036.hbase.pb.RollWAL" + + "WriterRequest\032\037.hbase.pb.RollWALWriterRe" + + "sponse\022P\n\rGetServerInfo\022\036.hbase.pb.GetSe" + + "rverInfoRequest\032\037.hbase.pb.GetServerInfo", + "Response\022G\n\nStopServer\022\033.hbase.pb.StopSe" + + "rverRequest\032\034.hbase.pb.StopServerRespons" + + "e\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Updat" + + "eFavoredNodesRequest\032$.hbase.pb.UpdateFa" + + "voredNodesResponse\022b\n\023UpdateConfiguratio" + + "n\022$.hbase.pb.UpdateConfigurationRequest\032" + + "%.hbase.pb.UpdateConfigurationResponse\022P" + + "\n\rGetRegionLoad\022\036.hbase.pb.GetRegionLoad" + + "Request\032\037.hbase.pb.GetRegionLoadResponse" + + "\022h\n\025ClearCompactionQueues\022&.hbase.pb.Cle", + "arCompactionQueuesRequest\032\'.hbase.pb.Cle" + + "arCompactionQueuesResponse\022k\n\026GetSpaceQu" + + "otaSnapshots\022\'.hbase.pb.GetSpaceQuotaSna" + + "pshotsRequest\032(.hbase.pb.GetSpaceQuotaSn" + + "apshotsResponse\022\\\n\021ExecuteProcedures\022\".h" + + "base.pb.ExecuteProceduresRequest\032#.hbase" + + ".pb.ExecuteProceduresResponse\022M\n\014MergeRe" + + "gions\022\035.hbase.pb.MergeRegionsRequest\032\036.h" + + "base.pb.MergeRegionsResponseBH\n1org.apac" + + "he.hadoop.hbase.shaded.protobuf.generate", + "dB\013AdminProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -30384,32 +31531,44 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetRegionInfoResponse_descriptor, new java.lang.String[] { "RegionInfo", "CompactionState", "IsRecovering", "Splittable", "Mergeable", }); - internal_static_hbase_pb_GetStoreFileRequest_descriptor = + internal_static_hbase_pb_GetBestSplitPointRequest_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_hbase_pb_GetBestSplitPointRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_GetBestSplitPointRequest_descriptor, + new java.lang.String[] { "Region", }); + internal_static_hbase_pb_GetBestSplitPointResponse_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_hbase_pb_GetBestSplitPointResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_GetBestSplitPointResponse_descriptor, + new java.lang.String[] { "SplitPoint", }); + internal_static_hbase_pb_GetStoreFileRequest_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_GetStoreFileRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetStoreFileRequest_descriptor, new java.lang.String[] { "Region", "Family", }); internal_static_hbase_pb_GetStoreFileResponse_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_GetStoreFileResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetStoreFileResponse_descriptor, new java.lang.String[] { "StoreFile", }); internal_static_hbase_pb_GetOnlineRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_hbase_pb_GetOnlineRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetOnlineRegionRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetOnlineRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(7); internal_static_hbase_pb_GetOnlineRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetOnlineRegionResponse_descriptor, new java.lang.String[] { "RegionInfo", }); internal_static_hbase_pb_OpenRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(8); internal_static_hbase_pb_OpenRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_OpenRegionRequest_descriptor, @@ -30421,73 +31580,73 @@ public final class AdminProtos { internal_static_hbase_pb_OpenRegionRequest_RegionOpenInfo_descriptor, new java.lang.String[] { "Region", "VersionOfOfflineNode", "FavoredNodes", "OpenForDistributedLogReplay", }); internal_static_hbase_pb_OpenRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(9); internal_static_hbase_pb_OpenRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_OpenRegionResponse_descriptor, new java.lang.String[] { "OpeningState", }); internal_static_hbase_pb_WarmupRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(10); internal_static_hbase_pb_WarmupRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_WarmupRegionRequest_descriptor, new java.lang.String[] { "RegionInfo", }); internal_static_hbase_pb_WarmupRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(11); internal_static_hbase_pb_WarmupRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_WarmupRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_CloseRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_CloseRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CloseRegionRequest_descriptor, new java.lang.String[] { "Region", "VersionOfClosingNode", "TransitionInZK", "DestinationServer", "ServerStartCode", }); internal_static_hbase_pb_CloseRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_hbase_pb_CloseRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CloseRegionResponse_descriptor, new java.lang.String[] { "Closed", }); internal_static_hbase_pb_FlushRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_FlushRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_FlushRegionRequest_descriptor, new java.lang.String[] { "Region", "IfOlderThanTs", "WriteFlushWalMarker", }); internal_static_hbase_pb_FlushRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_FlushRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_FlushRegionResponse_descriptor, new java.lang.String[] { "LastFlushTime", "Flushed", "WroteFlushWalMarker", }); internal_static_hbase_pb_SplitRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_SplitRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SplitRegionRequest_descriptor, new java.lang.String[] { "Region", "SplitPoint", }); internal_static_hbase_pb_SplitRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_SplitRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SplitRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_CompactRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_CompactRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CompactRegionRequest_descriptor, new java.lang.String[] { "Region", "Major", "Family", }); internal_static_hbase_pb_CompactRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_CompactRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CompactRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_UpdateFavoredNodesRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor, @@ -30499,127 +31658,127 @@ public final class AdminProtos { internal_static_hbase_pb_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor, new java.lang.String[] { "Region", "FavoredNodes", }); internal_static_hbase_pb_UpdateFavoredNodesResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_UpdateFavoredNodesResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateFavoredNodesResponse_descriptor, new java.lang.String[] { "Response", }); internal_static_hbase_pb_WALEntry_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_WALEntry_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_WALEntry_descriptor, new java.lang.String[] { "Key", "KeyValueBytes", "AssociatedCellCount", }); internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_ReplicateWALEntryRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor, new java.lang.String[] { "Entry", "ReplicationClusterId", "SourceBaseNamespaceDirPath", "SourceHFileArchiveDirPath", }); internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_ReplicateWALEntryResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RollWALWriterRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_RollWALWriterRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RollWALWriterRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RollWALWriterResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(26); internal_static_hbase_pb_RollWALWriterResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RollWALWriterResponse_descriptor, new java.lang.String[] { "RegionToFlush", }); internal_static_hbase_pb_StopServerRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(27); internal_static_hbase_pb_StopServerRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopServerRequest_descriptor, new java.lang.String[] { "Reason", }); internal_static_hbase_pb_StopServerResponse_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(28); internal_static_hbase_pb_StopServerResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopServerResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetServerInfoRequest_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(29); internal_static_hbase_pb_GetServerInfoRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetServerInfoRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ServerInfo_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(30); internal_static_hbase_pb_ServerInfo_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ServerInfo_descriptor, new java.lang.String[] { "ServerName", "WebuiPort", }); internal_static_hbase_pb_GetServerInfoResponse_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(31); internal_static_hbase_pb_GetServerInfoResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetServerInfoResponse_descriptor, new java.lang.String[] { "ServerInfo", }); internal_static_hbase_pb_UpdateConfigurationRequest_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(32); internal_static_hbase_pb_UpdateConfigurationRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_UpdateConfigurationResponse_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(33); internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetRegionLoadRequest_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(34); internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetRegionLoadRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetRegionLoadResponse_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(35); internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetRegionLoadResponse_descriptor, new java.lang.String[] { "RegionLoads", }); internal_static_hbase_pb_ClearCompactionQueuesRequest_descriptor = - getDescriptor().getMessageTypes().get(34); + getDescriptor().getMessageTypes().get(36); internal_static_hbase_pb_ClearCompactionQueuesRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ClearCompactionQueuesRequest_descriptor, new java.lang.String[] { "QueueName", }); internal_static_hbase_pb_ClearCompactionQueuesResponse_descriptor = - getDescriptor().getMessageTypes().get(35); + getDescriptor().getMessageTypes().get(37); internal_static_hbase_pb_ClearCompactionQueuesResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ClearCompactionQueuesResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ExecuteProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(36); + getDescriptor().getMessageTypes().get(38); internal_static_hbase_pb_ExecuteProceduresRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ExecuteProceduresRequest_descriptor, new java.lang.String[] { "OpenRegion", "CloseRegion", }); internal_static_hbase_pb_ExecuteProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(37); + getDescriptor().getMessageTypes().get(39); internal_static_hbase_pb_ExecuteProceduresResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ExecuteProceduresResponse_descriptor, new java.lang.String[] { "OpenRegion", "CloseRegion", }); internal_static_hbase_pb_MergeRegionsRequest_descriptor = - getDescriptor().getMessageTypes().get(38); + getDescriptor().getMessageTypes().get(40); internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MergeRegionsRequest_descriptor, new java.lang.String[] { "RegionA", "RegionB", "Forcible", "MasterSystemTime", }); internal_static_hbase_pb_MergeRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(39); + getDescriptor().getMessageTypes().get(41); internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MergeRegionsResponse_descriptor, diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java index b93f6cc..96652b4 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java @@ -22985,7 +22985,7 @@ public final class ClientProtos { * optional .hbase.pb.Cursor cursor = 12; */ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder> + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder> getCursorFieldBuilder() { if (cursorBuilder_ == null) { cursorBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< @@ -41831,7 +41831,7 @@ public final class ClientProtos { internal_static_hbase_pb_ScanRequest_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_Cursor_descriptor; - private static final + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_Cursor_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java index 5ea2044..0d2e09f 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java @@ -11552,11 +11552,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ boolean hasSplitRow(); /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow(); @@ -11700,13 +11700,13 @@ public final class MasterProtos { public static final int SPLIT_ROW_FIELD_NUMBER = 2; private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_; /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ public boolean hasSplitRow() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { return splitRow_; @@ -11752,10 +11752,6 @@ public final class MasterProtos { memoizedIsInitialized = 0; return false; } - if (!hasSplitRow()) { - memoizedIsInitialized = 0; - return false; - } if (!getRegionInfo().isInitialized()) { memoizedIsInitialized = 0; return false; @@ -12106,9 +12102,6 @@ public final class MasterProtos { if (!hasRegionInfo()) { return false; } - if (!hasSplitRow()) { - return false; - } if (!getRegionInfo().isInitialized()) { return false; } @@ -12254,19 +12247,19 @@ public final class MasterProtos { private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ public boolean hasSplitRow() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { return splitRow_; } /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ public Builder setSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { @@ -12278,7 +12271,7 @@ public final class MasterProtos { return this; } /** - * required bytes split_row = 2; + * optional bytes split_row = 2; */ public Builder clearSplitRow() { bitField0_ = (bitField0_ & ~0x00000002); @@ -81243,7 +81236,7 @@ public final class MasterProtos { ".pb.RegionSpecifier\"\027\n\025OfflineRegionResp", "onse\"\201\001\n\027SplitTableRegionRequest\022)\n\013regi" + "on_info\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\021\n\ts" + - "plit_row\030\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" + + "plit_row\030\002 \001(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" + "\n\005nonce\030\004 \001(\004:\0010\"+\n\030SplitTableRegionResp" + "onse\022\017\n\007proc_id\030\001 \001(\004\"\177\n\022CreateTableRequ" + "est\022+\n\014table_schema\030\001 \002(\0132\025.hbase.pb.Tab" + diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto index fe95fd5..a91ecb527 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto @@ -52,6 +52,14 @@ message GetRegionInfoResponse { } } +message GetBestSplitPointRequest { + required RegionSpecifier region = 1; +} + +message GetBestSplitPointResponse { + optional bytes splitPoint = 1; +} + /** * Get a list of store files for a set of column families in a particular region. * If no column family is specified, get the store files for all column families. @@ -290,6 +298,9 @@ service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); + rpc GetBestSplitPoint(GetBestSplitPointRequest) + returns (GetBestSplitPointResponse); + rpc GetStoreFile(GetStoreFileRequest) returns(GetStoreFileResponse); diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto index 7015fcb..c9c586f 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -136,7 +136,7 @@ message OfflineRegionResponse { message SplitTableRegionRequest { required RegionInfo region_info = 1; - required bytes split_row = 2; + optional bytes split_row = 2; optional uint64 nonce_group = 3 [default = 0]; optional uint64 nonce = 4 [default = 0]; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index aa0d803..a5e6fb2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1634,6 +1634,10 @@ public class HMaster extends HRegionServer implements MasterServices { final long nonceGroup, final long nonce) throws IOException { checkInitialized(); + if(regionInfo.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { + throw new IOException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split."); + } return MasterProcedureUtil.submitProcedure( new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index c6397f3..cb201f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -636,7 +636,7 @@ public class MasterRpcServices extends RSRpcServices try { long procId = master.splitRegion( HRegionInfo.convert(request.getRegionInfo()), - request.getSplitRow().toByteArray(), + request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(), request.getNonce()); return SplitTableRegionResponse.newBuilder().setProcId(procId).build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 219b67b..132f86b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProced import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; @@ -84,6 +85,7 @@ public class SplitTableRegionProcedure private Boolean traceEnabled = null; private HRegionInfo daughter_1_HRI; private HRegionInfo daughter_2_HRI; + private byte[] regionSplitRow = null; public SplitTableRegionProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -93,26 +95,37 @@ public class SplitTableRegionProcedure final HRegionInfo regionToSplit, final byte[] splitRow) throws IOException { super(env, regionToSplit); - checkSplitRow(regionToSplit, splitRow); + checkSplitRow(env, regionToSplit, splitRow); final TableName table = regionToSplit.getTable(); final long rid = getDaughterRegionIdTimestamp(regionToSplit); - this.daughter_1_HRI = new HRegionInfo(table, regionToSplit.getStartKey(), splitRow, false, rid); - this.daughter_2_HRI = new HRegionInfo(table, splitRow, regionToSplit.getEndKey(), false, rid); + this.daughter_1_HRI = new HRegionInfo(table, regionToSplit.getStartKey(), regionSplitRow, false, rid); + this.daughter_2_HRI = new HRegionInfo(table, regionSplitRow, regionToSplit.getEndKey(), false, rid); } - private static void checkSplitRow(final HRegionInfo regionToSplit, final byte[] splitRow) - throws IOException { - if (splitRow == null || splitRow.length == 0) { - throw new DoNotRetryIOException("Split row cannot be null"); + private void checkSplitRow(final MasterProcedureEnv env, + final HRegionInfo regionToSplit, final byte[] splitRow) + throws IOException { + regionSplitRow = splitRow; + RegionStateNode node = + env.getAssignmentManager().getRegionStates().getRegionNode(getParentRegion()); + if (node != null && (regionSplitRow == null || regionSplitRow.length == 0)) { + LOG.info("splitKey isn't explicitly specified, will find a best split key"); + AdminProtos.GetBestSplitPointResponse response = + Util.getBestSplitPointResponse(env, node.getRegionLocation(), node.getRegionInfo()); + regionSplitRow = response.hasSplitPoint() ? response.getSplitPoint().toByteArray() : null; + } + + if (regionSplitRow == null || regionSplitRow.length == 0) { + throw new DoNotRetryIOException("Region not splittable because split row = null"); } - if (Bytes.equals(regionToSplit.getStartKey(), splitRow)) { + if (Bytes.equals(regionToSplit.getStartKey(), regionSplitRow)) { throw new DoNotRetryIOException( "Split row is equal to startkey: " + Bytes.toStringBinary(splitRow)); } - if (!regionToSplit.containsRow(splitRow)) { + if (!regionToSplit.containsRow(regionSplitRow)) { throw new DoNotRetryIOException( "Split row is not inside region key range splitKey:" + Bytes.toStringBinary(splitRow) + " region: " + regionToSplit); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java index cb3861a..b23210d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse; /** * Utility for this assignment package only. @@ -57,4 +59,20 @@ class Util { throw ProtobufUtil.handleRemoteException(e); } } + + public static GetBestSplitPointResponse getBestSplitPointResponse(final MasterProcedureEnv env, + final ServerName regionLocation, final HRegionInfo hri) + throws IOException { + // TODO: There is no timeout on this controller. Set one! + HBaseRpcController controller = env.getMasterServices().getClusterConnection(). + getRpcControllerFactory().newController(); + final AdminService.BlockingInterface admin = + env.getMasterServices().getClusterConnection().getAdmin(regionLocation); + GetBestSplitPointRequest request = RequestConverter.builGetBestSplitPointRequest(hri.getRegionName()); + try { + return admin.getBestSplitPoint(controller, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index e4da8db..503fd1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -129,6 +129,7 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; @@ -140,6 +141,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProc import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; @@ -1651,6 +1654,27 @@ public class RSRpcServices implements HBaseRPCErrorHandler, @Override @QosPriority(priority=HConstants.ADMIN_QOS) + public GetBestSplitPointResponse getBestSplitPoint(final RpcController controller, + final GetBestSplitPointRequest request) throws ServiceException{ + try { + checkOpen(); + requestCount.increment(); + HRegion region = (HRegion) getRegion(request.getRegion()); + region.startRegionOperation(Operation.SPLIT_REGION); + region.flush(true); + region.forceSplit(null); + byte[] splitPoint = region.checkSplit(); + GetBestSplitPointResponse.Builder builder = GetBestSplitPointResponse.newBuilder(); + if(splitPoint != null) + builder.setSplitPoint(UnsafeByteOperations.unsafeWrap(splitPoint)); + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + @Override + @QosPriority(priority=HConstants.ADMIN_QOS) public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request) throws ServiceException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index c5681b1..8715690 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -963,7 +963,7 @@ public class TestAdmin1 { // one of the column families isn't splittable int[] rowCounts = new int[] { 6000, 1 }; - splitTest(null, familyNames, rowCounts, numVersions, blockSize); + //splitTest(null, familyNames, rowCounts, numVersions, blockSize); rowCounts = new int[] { 1, 6000 }; splitTest(null, familyNames, rowCounts, numVersions, blockSize); @@ -1166,7 +1166,7 @@ public class TestAdmin1 { // regions). Try splitting that region via a different split API (the difference is // this API goes direct to the regionserver skipping any checks in the admin). Should fail try { - TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(), + TEST_UTIL.getHBaseAdmin().splitRegionAsync(regions.get(1).getFirst(), new byte[]{(byte)'1'}); } catch (IOException ex) { gotException = true; @@ -1366,4 +1366,8 @@ public class TestAdmin1 { this.admin.deleteTable(tableName); } } + + public void testSplitRegions() { + + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 7ac7571..ec1669c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProc import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetBestSplitPointResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; @@ -458,6 +460,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public GetBestSplitPointResponse getBestSplitPoint(final RpcController controller, + final GetBestSplitPointRequest request) throws ServiceException { + GetBestSplitPointResponse.Builder builder = GetBestSplitPointResponse.newBuilder(); + return builder.build(); + } + + @Override public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, ClearCompactionQueuesRequest request) throws ServiceException { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index 0e69f1c..afe6c8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -284,6 +284,12 @@ public class TestReplicator extends TestReplicationBase { } @Override + public GetBestSplitPointResponse getBestSplitPoint(final RpcController controller, + final GetBestSplitPointRequest request) throws ServiceException { + return delegate.getBestSplitPoint(controller, request); + } + + @Override public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request) throws ServiceException { return delegate.getStoreFile(controller, request);