diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index f2fc9a5..9a8cfff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Future; import java.util.regex.Pattern; @@ -1996,4 +1997,14 @@ public interface Admin extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs */ void disableTableReplication(final TableName tableName) throws IOException; + + /** + * Clear compact queues on a regionserver. + * @param sn the region server name + * @param queues the set of queue name + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + void clearCompactQueues(final ServerName sn, final Set queues) + throws IOException, InterruptedException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index cadd6cc..d699712 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -4287,4 +4288,22 @@ public class HBaseAdmin implements Admin { return otherConf; } + + @Override + public void clearCompactQueues(final ServerName sn, final Set queues) + throws IOException, InterruptedException { + final AdminService.BlockingInterface admin = this.connection.getAdmin(sn); + Callable callable = new Callable() { + @Override + public Void call() throws Exception { + // TODO: There is no timeout on this controller. Set one! + HBaseRpcController controller = rpcControllerFactory.newController(); + ClearCompactQueuesRequest request = + RequestConverter.buildClearQueuesRequest(queues); + admin.clearCompactQueues(controller, request); + return null; + } + }; + ProtobufUtil.call(callable); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index a513d66..c92e6a7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.shaded.protobuf; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.regex.Pattern; import org.apache.hadoop.hbase.CellScannable; @@ -48,8 +49,10 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -1695,4 +1698,15 @@ public final class RequestConverter { builder.setNamespaceName(name); return builder.build(); } + + /** + * Create a RegionOpenInfo based on given region info and version of offline node + */ + public static ClearCompactQueuesRequest buildClearQueuesRequest(Set queues) { + ClearCompactQueuesRequest.Builder builder = ClearCompactQueuesRequest.newBuilder(); + for(String name: queues) { + builder.addQueueName(name); + } + return builder.build(); + } } diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java index 703de38..1c4d638 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java @@ -19532,6 +19532,7 @@ public final class AdminProtos { } /** *
+   *
    * Roll request responses no longer include regions to flush
    * this list will always be empty when talking to a 1.0 server
    * 
@@ -19797,6 +19798,7 @@ public final class AdminProtos { } /** *
+     *
      * Roll request responses no longer include regions to flush
      * this list will always be empty when talking to a 1.0 server
      * 
@@ -24750,138 +24752,1075 @@ public final class AdminProtos { } + public interface ClearCompactQueuesRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ClearCompactQueuesRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated string queue_name = 1; + */ + java.util.List + getQueueNameList(); + /** + * repeated string queue_name = 1; + */ + int getQueueNameCount(); + /** + * repeated string queue_name = 1; + */ + java.lang.String getQueueName(int index); + /** + * repeated string queue_name = 1; + */ + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getQueueNameBytes(int index); + } /** - * Protobuf service {@code hbase.pb.AdminService} + * Protobuf type {@code hbase.pb.ClearCompactQueuesRequest} */ - public static abstract class AdminService - implements org.apache.hadoop.hbase.shaded.com.google.protobuf.Service { - protected AdminService() {} - - public interface Interface { - /** - * rpc GetRegionInfo(.hbase.pb.GetRegionInfoRequest) returns (.hbase.pb.GetRegionInfoResponse); - */ - public abstract void getRegionInfo( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); - - /** - * rpc GetStoreFile(.hbase.pb.GetStoreFileRequest) returns (.hbase.pb.GetStoreFileResponse); - */ - public abstract void getStoreFile( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); - - /** - * rpc GetOnlineRegion(.hbase.pb.GetOnlineRegionRequest) returns (.hbase.pb.GetOnlineRegionResponse); - */ - public abstract void getOnlineRegion( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); - - /** - * rpc OpenRegion(.hbase.pb.OpenRegionRequest) returns (.hbase.pb.OpenRegionResponse); - */ - public abstract void openRegion( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final class ClearCompactQueuesRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ClearCompactQueuesRequest) + ClearCompactQueuesRequestOrBuilder { + // Use ClearCompactQueuesRequest.newBuilder() to construct. + private ClearCompactQueuesRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClearCompactQueuesRequest() { + queueName_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; + } - /** - * rpc WarmupRegion(.hbase.pb.WarmupRegionRequest) returns (.hbase.pb.WarmupRegionResponse); - */ - public abstract void warmupRegion( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClearCompactQueuesRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + queueName_.add(bs); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = queueName_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesRequest_descriptor; + } - /** - * rpc CloseRegion(.hbase.pb.CloseRegionRequest) returns (.hbase.pb.CloseRegionResponse); - */ - public abstract void closeRegion( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.Builder.class); + } - /** - * rpc CloseRegionForSplitOrMerge(.hbase.pb.CloseRegionForSplitOrMergeRequest) returns (.hbase.pb.CloseRegionForSplitOrMergeResponse); - */ - public abstract void closeRegionForSplitOrMerge( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final int QUEUE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringList queueName_; + /** + * repeated string queue_name = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolStringList + getQueueNameList() { + return queueName_; + } + /** + * repeated string queue_name = 1; + */ + public int getQueueNameCount() { + return queueName_.size(); + } + /** + * repeated string queue_name = 1; + */ + public java.lang.String getQueueName(int index) { + return queueName_.get(index); + } + /** + * repeated string queue_name = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getQueueNameBytes(int index) { + return queueName_.getByteString(index); + } - /** - * rpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse); - */ - public abstract void flushRegion( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; - /** - * rpc SplitRegion(.hbase.pb.SplitRegionRequest) returns (.hbase.pb.SplitRegionResponse); - */ - public abstract void splitRegion( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + memoizedIsInitialized = 1; + return true; + } - /** - * rpc CompactRegion(.hbase.pb.CompactRegionRequest) returns (.hbase.pb.CompactRegionResponse); - */ - public abstract void compactRegion( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < queueName_.size(); i++) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, queueName_.getRaw(i)); + } + unknownFields.writeTo(output); + } - /** - * rpc ReplicateWALEntry(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse); - */ - public abstract void replicateWALEntry( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; - /** - * rpc Replay(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse); - */ - public abstract void replay( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + size = 0; + { + int dataSize = 0; + for (int i = 0; i < queueName_.size(); i++) { + dataSize += computeStringSizeNoTag(queueName_.getRaw(i)); + } + size += dataSize; + size += 1 * getQueueNameList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } - /** - * rpc RollWALWriter(.hbase.pb.RollWALWriterRequest) returns (.hbase.pb.RollWALWriterResponse); - */ - public abstract void rollWALWriter( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest) obj; - /** - * rpc GetServerInfo(.hbase.pb.GetServerInfoRequest) returns (.hbase.pb.GetServerInfoResponse); - */ - public abstract void getServerInfo( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + boolean result = true; + result = result && getQueueNameList() + .equals(other.getQueueNameList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } - /** - * rpc StopServer(.hbase.pb.StopServerRequest) returns (.hbase.pb.StopServerResponse); - */ - public abstract void stopServer( - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request, - org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getQueueNameCount() > 0) { + hash = (37 * hash) + QUEUE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getQueueNameList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } - /** - * rpc UpdateFavoredNodes(.hbase.pb.UpdateFavoredNodesRequest) returns (.hbase.pb.UpdateFavoredNodesResponse); - */ - public abstract void updateFavoredNodes( + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ClearCompactQueuesRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ClearCompactQueuesRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + queueName_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = queueName_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.queueName_ = queueName_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.getDefaultInstance()) return this; + if (!other.queueName_.isEmpty()) { + if (queueName_.isEmpty()) { + queueName_ = other.queueName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureQueueNameIsMutable(); + queueName_.addAll(other.queueName_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringList queueName_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureQueueNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList(queueName_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string queue_name = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolStringList + getQueueNameList() { + return queueName_.getUnmodifiableView(); + } + /** + * repeated string queue_name = 1; + */ + public int getQueueNameCount() { + return queueName_.size(); + } + /** + * repeated string queue_name = 1; + */ + public java.lang.String getQueueName(int index) { + return queueName_.get(index); + } + /** + * repeated string queue_name = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getQueueNameBytes(int index) { + return queueName_.getByteString(index); + } + /** + * repeated string queue_name = 1; + */ + public Builder setQueueName( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueueNameIsMutable(); + queueName_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder addQueueName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueueNameIsMutable(); + queueName_.add(value); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder addAllQueueName( + java.lang.Iterable values) { + ensureQueueNameIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, queueName_); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder clearQueueName() { + queueName_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder addQueueNameBytes( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueueNameIsMutable(); + queueName_.add(value); + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearCompactQueuesRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ClearCompactQueuesRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ClearCompactQueuesRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ClearCompactQueuesRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface ClearCompactQueuesResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ClearCompactQueuesResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ClearCompactQueuesResponse} + */ + public static final class ClearCompactQueuesResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ClearCompactQueuesResponse) + ClearCompactQueuesResponseOrBuilder { + // Use ClearCompactQueuesResponse.newBuilder() to construct. + private ClearCompactQueuesResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ClearCompactQueuesResponse() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClearCompactQueuesResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse) obj; + + boolean result = true; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ClearCompactQueuesResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ClearCompactQueuesResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearCompactQueuesResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse(this); + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearCompactQueuesResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ClearCompactQueuesResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ClearCompactQueuesResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ClearCompactQueuesResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + /** + * Protobuf service {@code hbase.pb.AdminService} + */ + public static abstract class AdminService + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.Service { + protected AdminService() {} + + public interface Interface { + /** + * rpc GetRegionInfo(.hbase.pb.GetRegionInfoRequest) returns (.hbase.pb.GetRegionInfoResponse); + */ + public abstract void getRegionInfo( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc GetStoreFile(.hbase.pb.GetStoreFileRequest) returns (.hbase.pb.GetStoreFileResponse); + */ + public abstract void getStoreFile( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc GetOnlineRegion(.hbase.pb.GetOnlineRegionRequest) returns (.hbase.pb.GetOnlineRegionResponse); + */ + public abstract void getOnlineRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc OpenRegion(.hbase.pb.OpenRegionRequest) returns (.hbase.pb.OpenRegionResponse); + */ + public abstract void openRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc WarmupRegion(.hbase.pb.WarmupRegionRequest) returns (.hbase.pb.WarmupRegionResponse); + */ + public abstract void warmupRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc CloseRegion(.hbase.pb.CloseRegionRequest) returns (.hbase.pb.CloseRegionResponse); + */ + public abstract void closeRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc CloseRegionForSplitOrMerge(.hbase.pb.CloseRegionForSplitOrMergeRequest) returns (.hbase.pb.CloseRegionForSplitOrMergeResponse); + */ + public abstract void closeRegionForSplitOrMerge( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse); + */ + public abstract void flushRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc SplitRegion(.hbase.pb.SplitRegionRequest) returns (.hbase.pb.SplitRegionResponse); + */ + public abstract void splitRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc CompactRegion(.hbase.pb.CompactRegionRequest) returns (.hbase.pb.CompactRegionResponse); + */ + public abstract void compactRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc ReplicateWALEntry(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse); + */ + public abstract void replicateWALEntry( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc Replay(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse); + */ + public abstract void replay( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc RollWALWriter(.hbase.pb.RollWALWriterRequest) returns (.hbase.pb.RollWALWriterResponse); + */ + public abstract void rollWALWriter( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc GetServerInfo(.hbase.pb.GetServerInfoRequest) returns (.hbase.pb.GetServerInfoResponse); + */ + public abstract void getServerInfo( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc StopServer(.hbase.pb.StopServerRequest) returns (.hbase.pb.StopServerResponse); + */ + public abstract void stopServer( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + * rpc UpdateFavoredNodes(.hbase.pb.UpdateFavoredNodesRequest) returns (.hbase.pb.UpdateFavoredNodesResponse); + */ + public abstract void updateFavoredNodes( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); @@ -24902,6 +25841,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + * rpc ClearCompactQueues(.hbase.pb.ClearCompactQueuesRequest) returns (.hbase.pb.ClearCompactQueuesResponse); + */ + public abstract void clearCompactQueues( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService( @@ -25051,6 +25998,14 @@ public final class AdminProtos { impl.getRegionLoad(controller, request, done); } + @java.lang.Override + public void clearCompactQueues( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.clearCompactQueues(controller, request, done); + } + }; } @@ -25109,6 +26064,8 @@ public final class AdminProtos { return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); case 17: return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request); + case 18: + return impl.clearCompactQueues(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25159,6 +26116,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25209,6 +26168,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25361,6 +26322,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + * rpc ClearCompactQueues(.hbase.pb.ClearCompactQueuesRequest) returns (.hbase.pb.ClearCompactQueuesResponse); + */ + public abstract void clearCompactQueues( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -25473,6 +26442,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 18: + this.clearCompactQueues(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -25523,6 +26497,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25573,6 +26549,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25863,6 +26841,21 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance())); } + + public void clearCompactQueues( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(18), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -25960,6 +26953,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse clearCompactQueues( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -26184,6 +27182,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse clearCompactQueues( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(18), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.AdminService) @@ -26379,6 +27389,16 @@ public final class AdminProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ClearCompactQueuesRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ClearCompactQueuesRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ClearCompactQueuesResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ClearCompactQueuesResponse_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -26464,48 +27484,52 @@ public final class AdminProtos { "tionResponse\"?\n\024GetRegionLoadRequest\022\'\n\n" + "table_name\030\001 \001(\0132\023.hbase.pb.TableName\"C\n" + "\025GetRegionLoadResponse\022*\n\014region_loads\030\001" + - " \003(\0132\024.hbase.pb.RegionLoad2\203\014\n\014AdminServ" + - "ice\022P\n\rGetRegionInfo\022\036.hbase.pb.GetRegio" + - "nInfoRequest\032\037.hbase.pb.GetRegionInfoRes" + - "ponse\022M\n\014GetStoreFile\022\035.hbase.pb.GetStor", - "eFileRequest\032\036.hbase.pb.GetStoreFileResp" + - "onse\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOn" + - "lineRegionRequest\032!.hbase.pb.GetOnlineRe" + - "gionResponse\022G\n\nOpenRegion\022\033.hbase.pb.Op" + - "enRegionRequest\032\034.hbase.pb.OpenRegionRes" + - "ponse\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupR" + - "egionRequest\032\036.hbase.pb.WarmupRegionResp" + - "onse\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegi" + - "onRequest\032\035.hbase.pb.CloseRegionResponse" + - "\022w\n\032CloseRegionForSplitOrMerge\022+.hbase.p", - "b.CloseRegionForSplitOrMergeRequest\032,.hb" + - "ase.pb.CloseRegionForSplitOrMergeRespons" + - "e\022J\n\013FlushRegion\022\034.hbase.pb.FlushRegionR" + - "equest\032\035.hbase.pb.FlushRegionResponse\022J\n" + - "\013SplitRegion\022\034.hbase.pb.SplitRegionReque" + - "st\032\035.hbase.pb.SplitRegionResponse\022P\n\rCom" + - "pactRegion\022\036.hbase.pb.CompactRegionReque" + - "st\032\037.hbase.pb.CompactRegionResponse\022\\\n\021R" + - "eplicateWALEntry\022\".hbase.pb.ReplicateWAL" + - "EntryRequest\032#.hbase.pb.ReplicateWALEntr", - "yResponse\022Q\n\006Replay\022\".hbase.pb.Replicate" + - "WALEntryRequest\032#.hbase.pb.ReplicateWALE" + - "ntryResponse\022P\n\rRollWALWriter\022\036.hbase.pb" + - ".RollWALWriterRequest\032\037.hbase.pb.RollWAL" + - "WriterResponse\022P\n\rGetServerInfo\022\036.hbase." + - "pb.GetServerInfoRequest\032\037.hbase.pb.GetSe" + - "rverInfoResponse\022G\n\nStopServer\022\033.hbase.p" + - "b.StopServerRequest\032\034.hbase.pb.StopServe" + - "rResponse\022_\n\022UpdateFavoredNodes\022#.hbase." + - "pb.UpdateFavoredNodesRequest\032$.hbase.pb.", - "UpdateFavoredNodesResponse\022b\n\023UpdateConf" + - "iguration\022$.hbase.pb.UpdateConfiguration" + - "Request\032%.hbase.pb.UpdateConfigurationRe" + - "sponse\022P\n\rGetRegionLoad\022\036.hbase.pb.GetRe" + - "gionLoadRequest\032\037.hbase.pb.GetRegionLoad" + - "ResponseBH\n1org.apache.hadoop.hbase.shad" + - "ed.protobuf.generatedB\013AdminProtosH\001\210\001\001\240" + - "\001\001" + " \003(\0132\024.hbase.pb.RegionLoad\"/\n\031ClearCompa" + + "ctQueuesRequest\022\022\n\nqueue_name\030\001 \003(\t\"\034\n\032C" + + "learCompactQueuesResponse2\344\014\n\014AdminServi" + + "ce\022P\n\rGetRegionInfo\022\036.hbase.pb.GetRegion", + "InfoRequest\032\037.hbase.pb.GetRegionInfoResp" + + "onse\022M\n\014GetStoreFile\022\035.hbase.pb.GetStore" + + "FileRequest\032\036.hbase.pb.GetStoreFileRespo" + + "nse\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnl" + + "ineRegionRequest\032!.hbase.pb.GetOnlineReg" + + "ionResponse\022G\n\nOpenRegion\022\033.hbase.pb.Ope" + + "nRegionRequest\032\034.hbase.pb.OpenRegionResp" + + "onse\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRe" + + "gionRequest\032\036.hbase.pb.WarmupRegionRespo" + + "nse\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegio", + "nRequest\032\035.hbase.pb.CloseRegionResponse\022" + + "w\n\032CloseRegionForSplitOrMerge\022+.hbase.pb" + + ".CloseRegionForSplitOrMergeRequest\032,.hba" + + "se.pb.CloseRegionForSplitOrMergeResponse" + + "\022J\n\013FlushRegion\022\034.hbase.pb.FlushRegionRe" + + "quest\032\035.hbase.pb.FlushRegionResponse\022J\n\013" + + "SplitRegion\022\034.hbase.pb.SplitRegionReques" + + "t\032\035.hbase.pb.SplitRegionResponse\022P\n\rComp" + + "actRegion\022\036.hbase.pb.CompactRegionReques" + + "t\032\037.hbase.pb.CompactRegionResponse\022\\\n\021Re", + "plicateWALEntry\022\".hbase.pb.ReplicateWALE" + + "ntryRequest\032#.hbase.pb.ReplicateWALEntry" + + "Response\022Q\n\006Replay\022\".hbase.pb.ReplicateW" + + "ALEntryRequest\032#.hbase.pb.ReplicateWALEn" + + "tryResponse\022P\n\rRollWALWriter\022\036.hbase.pb." + + "RollWALWriterRequest\032\037.hbase.pb.RollWALW" + + "riterResponse\022P\n\rGetServerInfo\022\036.hbase.p" + + "b.GetServerInfoRequest\032\037.hbase.pb.GetSer" + + "verInfoResponse\022G\n\nStopServer\022\033.hbase.pb" + + ".StopServerRequest\032\034.hbase.pb.StopServer", + "Response\022_\n\022UpdateFavoredNodes\022#.hbase.p" + + "b.UpdateFavoredNodesRequest\032$.hbase.pb.U" + + "pdateFavoredNodesResponse\022b\n\023UpdateConfi" + + "guration\022$.hbase.pb.UpdateConfigurationR" + + "equest\032%.hbase.pb.UpdateConfigurationRes" + + "ponse\022P\n\rGetRegionLoad\022\036.hbase.pb.GetReg" + + "ionLoadRequest\032\037.hbase.pb.GetRegionLoadR" + + "esponse\022_\n\022ClearCompactQueues\022#.hbase.pb" + + ".ClearCompactQueuesRequest\032$.hbase.pb.Cl" + + "earCompactQueuesResponseBH\n1org.apache.h", + "adoop.hbase.shaded.protobuf.generatedB\013A" + + "dminProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -26750,6 +27774,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetRegionLoadResponse_descriptor, new java.lang.String[] { "RegionLoads", }); + internal_static_hbase_pb_ClearCompactQueuesRequest_descriptor = + getDescriptor().getMessageTypes().get(36); + internal_static_hbase_pb_ClearCompactQueuesRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_ClearCompactQueuesRequest_descriptor, + new java.lang.String[] { "QueueName", }); + internal_static_hbase_pb_ClearCompactQueuesResponse_descriptor = + getDescriptor().getMessageTypes().get(37); + internal_static_hbase_pb_ClearCompactQueuesResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_ClearCompactQueuesResponse_descriptor, + new java.lang.String[] { }); org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.getDescriptor(); diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto index 338c80b..53c54a6 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto @@ -260,6 +260,13 @@ message GetRegionLoadResponse { repeated RegionLoad region_loads = 1; } +message ClearCompactQueuesRequest { + repeated string queue_name = 1; +} + +message ClearCompactQueuesResponse { +} + service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); @@ -314,4 +321,7 @@ service AdminService { rpc GetRegionLoad(GetRegionLoadRequest) returns(GetRegionLoadResponse); + + rpc ClearCompactQueues(ClearCompactQueuesRequest) + returns(ClearCompactQueuesResponse); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index c8f8be9..ae5c637 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -22420,6 +22420,876 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:hbase.pb.UpdateConfigurationResponse) } + public interface ClearQueuesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string queue_name = 1; + /** + * repeated string queue_name = 1; + */ + java.util.List + getQueueNameList(); + /** + * repeated string queue_name = 1; + */ + int getQueueNameCount(); + /** + * repeated string queue_name = 1; + */ + java.lang.String getQueueName(int index); + /** + * repeated string queue_name = 1; + */ + com.google.protobuf.ByteString + getQueueNameBytes(int index); + } + /** + * Protobuf type {@code hbase.pb.ClearQueuesRequest} + */ + public static final class ClearQueuesRequest extends + com.google.protobuf.GeneratedMessage + implements ClearQueuesRequestOrBuilder { + // Use ClearQueuesRequest.newBuilder() to construct. + private ClearQueuesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ClearQueuesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ClearQueuesRequest defaultInstance; + public static ClearQueuesRequest getDefaultInstance() { + return defaultInstance; + } + + public ClearQueuesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClearQueuesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + queueName_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = new com.google.protobuf.UnmodifiableLazyStringList(queueName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearQueuesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClearQueuesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string queue_name = 1; + public static final int QUEUE_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList queueName_; + /** + * repeated string queue_name = 1; + */ + public java.util.List + getQueueNameList() { + return queueName_; + } + /** + * repeated string queue_name = 1; + */ + public int getQueueNameCount() { + return queueName_.size(); + } + /** + * repeated string queue_name = 1; + */ + public java.lang.String getQueueName(int index) { + return queueName_.get(index); + } + /** + * repeated string queue_name = 1; + */ + public com.google.protobuf.ByteString + getQueueNameBytes(int index) { + return queueName_.getByteString(index); + } + + private void initFields() { + queueName_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < queueName_.size(); i++) { + output.writeBytes(1, queueName_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < queueName_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(queueName_.getByteString(i)); + } + size += dataSize; + size += 1 * getQueueNameList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest) obj; + + boolean result = true; + result = result && getQueueNameList() + .equals(other.getQueueNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getQueueNameCount() > 0) { + hash = (37 * hash) + QUEUE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getQueueNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ClearQueuesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + queueName_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = new com.google.protobuf.UnmodifiableLazyStringList( + queueName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.queueName_ = queueName_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.getDefaultInstance()) return this; + if (!other.queueName_.isEmpty()) { + if (queueName_.isEmpty()) { + queueName_ = other.queueName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureQueueNameIsMutable(); + queueName_.addAll(other.queueName_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string queue_name = 1; + private com.google.protobuf.LazyStringList queueName_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureQueueNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + queueName_ = new com.google.protobuf.LazyStringArrayList(queueName_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string queue_name = 1; + */ + public java.util.List + getQueueNameList() { + return java.util.Collections.unmodifiableList(queueName_); + } + /** + * repeated string queue_name = 1; + */ + public int getQueueNameCount() { + return queueName_.size(); + } + /** + * repeated string queue_name = 1; + */ + public java.lang.String getQueueName(int index) { + return queueName_.get(index); + } + /** + * repeated string queue_name = 1; + */ + public com.google.protobuf.ByteString + getQueueNameBytes(int index) { + return queueName_.getByteString(index); + } + /** + * repeated string queue_name = 1; + */ + public Builder setQueueName( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueueNameIsMutable(); + queueName_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder addQueueName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueueNameIsMutable(); + queueName_.add(value); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder addAllQueueName( + java.lang.Iterable values) { + ensureQueueNameIsMutable(); + super.addAll(values, queueName_); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder clearQueueName() { + queueName_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string queue_name = 1; + */ + public Builder addQueueNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueueNameIsMutable(); + queueName_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearQueuesRequest) + } + + static { + defaultInstance = new ClearQueuesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ClearQueuesRequest) + } + + public interface ClearQueuesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ClearQueuesResponse} + */ + public static final class ClearQueuesResponse extends + com.google.protobuf.GeneratedMessage + implements ClearQueuesResponseOrBuilder { + // Use ClearQueuesResponse.newBuilder() to construct. + private ClearQueuesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ClearQueuesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ClearQueuesResponse defaultInstance; + public static ClearQueuesResponse getDefaultInstance() { + return defaultInstance; + } + + public ClearQueuesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClearQueuesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearQueuesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClearQueuesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ClearQueuesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearQueuesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearQueuesResponse) + } + + static { + defaultInstance = new ClearQueuesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ClearQueuesResponse) + } + /** * Protobuf service {@code hbase.pb.AdminService} */ @@ -22564,6 +23434,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ClearQueues(.hbase.pb.ClearQueuesRequest) returns (.hbase.pb.ClearQueuesResponse); + */ + public abstract void clearQueues( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -22705,6 +23583,14 @@ public final class AdminProtos { impl.updateConfiguration(controller, request, done); } + @java.lang.Override + public void clearQueues( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest request, + com.google.protobuf.RpcCallback done) { + impl.clearQueues(controller, request, done); + } + }; } @@ -22761,6 +23647,8 @@ public final class AdminProtos { return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); case 16: return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); + case 17: + return impl.clearQueues(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -22809,6 +23697,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -22857,6 +23747,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23001,6 +23893,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ClearQueues(.hbase.pb.ClearQueuesRequest) returns (.hbase.pb.ClearQueuesResponse); + */ + public abstract void clearQueues( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -23108,6 +24008,11 @@ public final class AdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 17: + this.clearQueues(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -23156,6 +24061,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23204,6 +24111,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23479,6 +24388,21 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance())); } + + public void clearQueues( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -23571,6 +24495,11 @@ public final class AdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse clearQueues( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -23783,6 +24712,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse clearQueues( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearQueuesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.AdminService) @@ -23968,6 +24909,16 @@ public final class AdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ClearQueuesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ClearQueuesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ClearQueuesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ClearQueuesResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -24051,44 +25002,47 @@ public final class AdminProtos { "B\n\025GetServerInfoResponse\022)\n\013server_info\030" + "\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateCon" + "figurationRequest\"\035\n\033UpdateConfiguration" + - "Response2\207\013\n\014AdminService\022P\n\rGetRegionIn" + - "fo\022\036.hbase.pb.GetRegionInfoRequest\032\037.hba" + - "se.pb.GetRegionInfoResponse\022M\n\014GetStoreF" + - "ile\022\035.hbase.pb.GetStoreFileRequest\032\036.hba" + - "se.pb.GetStoreFileResponse\022V\n\017GetOnlineR" + - "egion\022 .hbase.pb.GetOnlineRegionRequest\032", - "!.hbase.pb.GetOnlineRegionResponse\022G\n\nOp" + - "enRegion\022\033.hbase.pb.OpenRegionRequest\032\034." + - "hbase.pb.OpenRegionResponse\022M\n\014WarmupReg" + - "ion\022\035.hbase.pb.WarmupRegionRequest\032\036.hba" + - "se.pb.WarmupRegionResponse\022J\n\013CloseRegio" + - "n\022\034.hbase.pb.CloseRegionRequest\032\035.hbase." + - "pb.CloseRegionResponse\022J\n\013FlushRegion\022\034." + - "hbase.pb.FlushRegionRequest\032\035.hbase.pb.F" + - "lushRegionResponse\022J\n\013SplitRegion\022\034.hbas" + - "e.pb.SplitRegionRequest\032\035.hbase.pb.Split", - "RegionResponse\022P\n\rCompactRegion\022\036.hbase." + - "pb.CompactRegionRequest\032\037.hbase.pb.Compa" + - "ctRegionResponse\022M\n\014MergeRegions\022\035.hbase" + - ".pb.MergeRegionsRequest\032\036.hbase.pb.Merge" + - "RegionsResponse\022\\\n\021ReplicateWALEntry\022\".h" + - "base.pb.ReplicateWALEntryRequest\032#.hbase" + - ".pb.ReplicateWALEntryResponse\022Q\n\006Replay\022" + - "\".hbase.pb.ReplicateWALEntryRequest\032#.hb" + - "ase.pb.ReplicateWALEntryResponse\022P\n\rRoll" + - "WALWriter\022\036.hbase.pb.RollWALWriterReques", - "t\032\037.hbase.pb.RollWALWriterResponse\022P\n\rGe" + - "tServerInfo\022\036.hbase.pb.GetServerInfoRequ" + - "est\032\037.hbase.pb.GetServerInfoResponse\022G\n\n" + - "StopServer\022\033.hbase.pb.StopServerRequest\032" + - "\034.hbase.pb.StopServerResponse\022_\n\022UpdateF" + - "avoredNodes\022#.hbase.pb.UpdateFavoredNode" + - "sRequest\032$.hbase.pb.UpdateFavoredNodesRe" + - "sponse\022b\n\023UpdateConfiguration\022$.hbase.pb" + - ".UpdateConfigurationRequest\032%.hbase.pb.U" + - "pdateConfigurationResponseBA\n*org.apache", - ".hadoop.hbase.protobuf.generatedB\013AdminP" + - "rotosH\001\210\001\001\240\001\001" + "Response\"(\n\022ClearQueuesRequest\022\022\n\nqueue_" + + "name\030\001 \003(\t\"\025\n\023ClearQueuesResponse2\323\013\n\014Ad" + + "minService\022P\n\rGetRegionInfo\022\036.hbase.pb.G" + + "etRegionInfoRequest\032\037.hbase.pb.GetRegion" + + "InfoResponse\022M\n\014GetStoreFile\022\035.hbase.pb." + + "GetStoreFileRequest\032\036.hbase.pb.GetStoreF", + "ileResponse\022V\n\017GetOnlineRegion\022 .hbase.p" + + "b.GetOnlineRegionRequest\032!.hbase.pb.GetO" + + "nlineRegionResponse\022G\n\nOpenRegion\022\033.hbas" + + "e.pb.OpenRegionRequest\032\034.hbase.pb.OpenRe" + + "gionResponse\022M\n\014WarmupRegion\022\035.hbase.pb." + + "WarmupRegionRequest\032\036.hbase.pb.WarmupReg" + + "ionResponse\022J\n\013CloseRegion\022\034.hbase.pb.Cl" + + "oseRegionRequest\032\035.hbase.pb.CloseRegionR" + + "esponse\022J\n\013FlushRegion\022\034.hbase.pb.FlushR" + + "egionRequest\032\035.hbase.pb.FlushRegionRespo", + "nse\022J\n\013SplitRegion\022\034.hbase.pb.SplitRegio" + + "nRequest\032\035.hbase.pb.SplitRegionResponse\022" + + "P\n\rCompactRegion\022\036.hbase.pb.CompactRegio" + + "nRequest\032\037.hbase.pb.CompactRegionRespons" + + "e\022M\n\014MergeRegions\022\035.hbase.pb.MergeRegion" + + "sRequest\032\036.hbase.pb.MergeRegionsResponse" + + "\022\\\n\021ReplicateWALEntry\022\".hbase.pb.Replica" + + "teWALEntryRequest\032#.hbase.pb.ReplicateWA" + + "LEntryResponse\022Q\n\006Replay\022\".hbase.pb.Repl" + + "icateWALEntryRequest\032#.hbase.pb.Replicat", + "eWALEntryResponse\022P\n\rRollWALWriter\022\036.hba" + + "se.pb.RollWALWriterRequest\032\037.hbase.pb.Ro" + + "llWALWriterResponse\022P\n\rGetServerInfo\022\036.h" + + "base.pb.GetServerInfoRequest\032\037.hbase.pb." + + "GetServerInfoResponse\022G\n\nStopServer\022\033.hb" + + "ase.pb.StopServerRequest\032\034.hbase.pb.Stop" + + "ServerResponse\022_\n\022UpdateFavoredNodes\022#.h" + + "base.pb.UpdateFavoredNodesRequest\032$.hbas" + + "e.pb.UpdateFavoredNodesResponse\022b\n\023Updat" + + "eConfiguration\022$.hbase.pb.UpdateConfigur", + "ationRequest\032%.hbase.pb.UpdateConfigurat" + + "ionResponse\022J\n\013ClearQueues\022\034.hbase.pb.Cl" + + "earQueuesRequest\032\035.hbase.pb.ClearQueuesR" + + "esponseBA\n*org.apache.hadoop.hbase.proto" + + "buf.generatedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -24311,6 +25265,18 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationResponse_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_ClearQueuesRequest_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_hbase_pb_ClearQueuesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ClearQueuesRequest_descriptor, + new java.lang.String[] { "QueueName", }); + internal_static_hbase_pb_ClearQueuesResponse_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_hbase_pb_ClearQueuesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ClearQueuesResponse_descriptor, + new java.lang.String[] { }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index 6096966..f8f6fff 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -255,6 +255,13 @@ message UpdateConfigurationRequest { message UpdateConfigurationResponse { } +message ClearCompactQueuesRequest { + repeated string queue_name = 1; +} + +message ClearCompactQueuesResponse { +} + service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); @@ -306,4 +313,7 @@ service AdminService { rpc UpdateConfiguration(UpdateConfigurationRequest) returns(UpdateConfigurationResponse); + + rpc ClearCompactQueues(ClearCompactQueuesRequest) + returns(ClearCompactQueuesResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index eba984a..1e095c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -719,4 +719,12 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi void shutdownLongCompactions(){ this.longCompactions.shutdown(); } + + public void clearLongCompactionsQueue() { + longCompactions.getQueue().clear(); + } + + public void clearShortCompactionsQueue() { + shortCompactions.getQueue().clear(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 8d4ea4d..bcec74b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -118,6 +118,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; @@ -1606,6 +1608,32 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return builder.build(); } + @Override + @QosPriority(priority=HConstants.ADMIN_QOS) + public ClearCompactQueuesResponse clearCompactQueues(RpcController controller, + ClearCompactQueuesRequest request) throws ServiceException { + ClearCompactQueuesResponse.Builder respBuilder = ClearCompactQueuesResponse.newBuilder(); + for (String queueName : request.getQueueNameList()) { + LOG.info("clear " + (queueName.equals("both")? "long and short " : queueName) + "compacts' queue"); + switch (queueName) { + case "long" : + regionServer.compactSplitThread.clearLongCompactionsQueue(); + break; + case "short" : + regionServer.compactSplitThread.clearShortCompactionsQueue(); + break; + case "both" : + regionServer.compactSplitThread.clearShortCompactionsQueue(); + regionServer.compactSplitThread.clearLongCompactionsQueue(); + break; + default: + LOG.warn("queueName = " + queueName + ", not support queue"); + break; + } + } + return respBuilder.build(); + } + /** * Get some information of the region server. * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 78c8214..a4b39e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactQueuesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest; @@ -452,6 +454,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public ClearCompactQueuesResponse clearCompactQueues(RpcController controller, + ClearCompactQueuesRequest request) throws ServiceException { + return null; + } + + @Override public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request) throws ServiceException { // TODO Auto-generated method stub diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 6aaa130..aeefb52 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1219,5 +1219,28 @@ module Hbase set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] end + + #---------------------------------------------------------------------------------------------- + # clear queues + def clear_compact_queues_rs(server_name, queue_name) + names = ['long', 'short','both'] + queues = java.util.HashSet.new + if queue_name.kind_of?(String) + queues.add(queue_name) + if !(names.include?(queue_name)) + raise(ArgumentError, "Not support quene [ " + queue_name + " ]") + end + elsif queue_name.kind_of?(Array) + queue_name.each do |s| + queues.add(s) + if !(names.include?(s)) + raise(ArgumentError, "Not support quene [ " + s + " ]") + end + end + else + raise(ArgumentError, "argument error #{queue_name}") + end + @admin.clearCompactQueues(ServerName.valueOf(server_name), queues) + end end end diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 66480f9..6f3985f 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -356,6 +356,7 @@ Shell.load_command_group( trace splitormerge_switch splitormerge_enabled + clear_compact_queues_rs ], # TODO remove older hlog_roll command :aliases => { diff --git a/hbase-shell/src/main/ruby/shell/commands/clear_compact_queues_rs.rb b/hbase-shell/src/main/ruby/shell/commands/clear_compact_queues_rs.rb new file mode 100644 index 0000000..ca2ca2e --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/clear_compact_queues_rs.rb @@ -0,0 +1,41 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class ClearCompactQueuesRs < Command + def help + return <<-EOF + Clear compact queues on a regionserver. + The queue_name contains short and long. + short is shortCompactions's queue,long is longCompactions's queue. + + Examples: + hbase> clear_queues 'host187.example.com,60020' + hbase> clear_queues 'host187.example.com,60020','long' + hbase> clear_queues 'host187.example.com,60020', ['long','short'] + EOF + end + + def command(server_name, queue_name = 'both') + admin.clear_compact_queues_rs(server_name, queue_name) + end + end + end +end \ No newline at end of file