Index: hbase-protocol/src/main/protobuf/Client.proto =================================================================== --- hbase-protocol/src/main/protobuf/Client.proto (revision 23090) +++ hbase-protocol/src/main/protobuf/Client.proto (revision 23094) @@ -389,4 +389,22 @@ rpc Multi(MultiRequest) returns(MultiResponse); + + rpc ExecBatchService(BatchCoprocessorServiceRequest) + returns(BatchCoprocessorServiceResponse); } + +message BatchCoprocessorServiceCall { + required RegionSpecifier region = 1; + required CoprocessorServiceCall call = 2; +} + +message BatchCoprocessorServiceRequest { + required string server_callback_name = 1; + optional bytes server_callback_initData = 2; + repeated BatchCoprocessorServiceCall call = 3; +} + +message BatchCoprocessorServiceResponse { + required NameBytesPair value = 1; +} \ No newline at end of file Index: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java =================================================================== --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java (revision 23090) +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java (revision 23094) @@ -28315,7 +28315,2336 @@ // @@protoc_insertion_point(class_scope:MultiResponse) } + public interface BatchCoprocessorServiceCallOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RegionSpecifier region = 1; + /** + * required .RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + + // required .CoprocessorServiceCall call = 2; + /** + * required .CoprocessorServiceCall call = 2; + */ + boolean hasCall(); + /** + * required .CoprocessorServiceCall call = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall getCall(); + /** + * required .CoprocessorServiceCall call = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCallOrBuilder getCallOrBuilder(); + } /** + * Protobuf type {@code BatchCoprocessorServiceCall} + */ + public static final class BatchCoprocessorServiceCall extends + com.google.protobuf.GeneratedMessage + implements BatchCoprocessorServiceCallOrBuilder { + // Use BatchCoprocessorServiceCall.newBuilder() to construct. + private BatchCoprocessorServiceCall(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BatchCoprocessorServiceCall(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BatchCoprocessorServiceCall defaultInstance; + public static BatchCoprocessorServiceCall getDefaultInstance() { + return defaultInstance; + } + + public BatchCoprocessorServiceCall getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BatchCoprocessorServiceCall( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = call_.toBuilder(); + } + call_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(call_); + call_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceCall_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceCall_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BatchCoprocessorServiceCall parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BatchCoprocessorServiceCall(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .RegionSpecifier region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_; + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_; + } + + // required .CoprocessorServiceCall call = 2; + public static final int CALL_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall call_; + /** + * required .CoprocessorServiceCall call = 2; + */ + public boolean hasCall() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall getCall() { + return call_; + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCallOrBuilder getCallOrBuilder() { + return call_; + } + + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + call_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCall()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getCall().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, call_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, call_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && (hasCall() == other.hasCall()); + if (hasCall()) { + result = result && getCall() + .equals(other.getCall()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + if (hasCall()) { + hash = (37 * hash) + CALL_FIELD_NUMBER; + hash = (53 * hash) + getCall().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code BatchCoprocessorServiceCall} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCallOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceCall_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceCall_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + getCallFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (callBuilder_ == null) { + call_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.getDefaultInstance(); + } else { + callBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceCall_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall build() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (callBuilder_ == null) { + result.call_ = call_; + } else { + result.call_ = callBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + if (other.hasCall()) { + mergeCall(other.getCall()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!hasCall()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } + if (!getCall().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .RegionSpecifier region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + /** + * required .RegionSpecifier region = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // required .CoprocessorServiceCall call = 2; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall call_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCallOrBuilder> callBuilder_; + /** + * required .CoprocessorServiceCall call = 2; + */ + public boolean hasCall() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall getCall() { + if (callBuilder_ == null) { + return call_; + } else { + return callBuilder_.getMessage(); + } + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public Builder setCall(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall value) { + if (callBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + call_ = value; + onChanged(); + } else { + callBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public Builder setCall( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder builderForValue) { + if (callBuilder_ == null) { + call_ = builderForValue.build(); + onChanged(); + } else { + callBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public Builder mergeCall(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall value) { + if (callBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + call_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.getDefaultInstance()) { + call_ = + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.newBuilder(call_).mergeFrom(value).buildPartial(); + } else { + call_ = value; + } + onChanged(); + } else { + callBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public Builder clearCall() { + if (callBuilder_ == null) { + call_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.getDefaultInstance(); + onChanged(); + } else { + callBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder getCallBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getCallFieldBuilder().getBuilder(); + } + /** + * required .CoprocessorServiceCall call = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCallOrBuilder getCallOrBuilder() { + if (callBuilder_ != null) { + return callBuilder_.getMessageOrBuilder(); + } else { + return call_; + } + } + /** + * required .CoprocessorServiceCall call = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCallOrBuilder> + getCallFieldBuilder() { + if (callBuilder_ == null) { + callBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCallOrBuilder>( + call_, + getParentForChildren(), + isClean()); + call_ = null; + } + return callBuilder_; + } + + // @@protoc_insertion_point(builder_scope:BatchCoprocessorServiceCall) + } + + static { + defaultInstance = new BatchCoprocessorServiceCall(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BatchCoprocessorServiceCall) + } + + public interface BatchCoprocessorServiceRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string server_callback_name = 1; + /** + * required string server_callback_name = 1; + */ + boolean hasServerCallbackName(); + /** + * required string server_callback_name = 1; + */ + java.lang.String getServerCallbackName(); + /** + * required string server_callback_name = 1; + */ + com.google.protobuf.ByteString + getServerCallbackNameBytes(); + + // optional bytes server_callback_initData = 2; + /** + * optional bytes server_callback_initData = 2; + */ + boolean hasServerCallbackInitData(); + /** + * optional bytes server_callback_initData = 2; + */ + com.google.protobuf.ByteString getServerCallbackInitData(); + + // repeated .BatchCoprocessorServiceCall call = 3; + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + java.util.List + getCallList(); + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall getCall(int index); + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + int getCallCount(); + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + java.util.List + getCallOrBuilderList(); + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCallOrBuilder getCallOrBuilder( + int index); + } + /** + * Protobuf type {@code BatchCoprocessorServiceRequest} + */ + public static final class BatchCoprocessorServiceRequest extends + com.google.protobuf.GeneratedMessage + implements BatchCoprocessorServiceRequestOrBuilder { + // Use BatchCoprocessorServiceRequest.newBuilder() to construct. + private BatchCoprocessorServiceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BatchCoprocessorServiceRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BatchCoprocessorServiceRequest defaultInstance; + public static BatchCoprocessorServiceRequest getDefaultInstance() { + return defaultInstance; + } + + public BatchCoprocessorServiceRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BatchCoprocessorServiceRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + serverCallbackName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + serverCallbackInitData_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + call_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + call_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + call_ = java.util.Collections.unmodifiableList(call_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BatchCoprocessorServiceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BatchCoprocessorServiceRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string server_callback_name = 1; + public static final int SERVER_CALLBACK_NAME_FIELD_NUMBER = 1; + private java.lang.Object serverCallbackName_; + /** + * required string server_callback_name = 1; + */ + public boolean hasServerCallbackName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server_callback_name = 1; + */ + public java.lang.String getServerCallbackName() { + java.lang.Object ref = serverCallbackName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + serverCallbackName_ = s; + } + return s; + } + } + /** + * required string server_callback_name = 1; + */ + public com.google.protobuf.ByteString + getServerCallbackNameBytes() { + java.lang.Object ref = serverCallbackName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serverCallbackName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bytes server_callback_initData = 2; + public static final int SERVER_CALLBACK_INITDATA_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString serverCallbackInitData_; + /** + * optional bytes server_callback_initData = 2; + */ + public boolean hasServerCallbackInitData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bytes server_callback_initData = 2; + */ + public com.google.protobuf.ByteString getServerCallbackInitData() { + return serverCallbackInitData_; + } + + // repeated .BatchCoprocessorServiceCall call = 3; + public static final int CALL_FIELD_NUMBER = 3; + private java.util.List call_; + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public java.util.List getCallList() { + return call_; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public java.util.List + getCallOrBuilderList() { + return call_; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public int getCallCount() { + return call_.size(); + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall getCall(int index) { + return call_.get(index); + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCallOrBuilder getCallOrBuilder( + int index) { + return call_.get(index); + } + + private void initFields() { + serverCallbackName_ = ""; + serverCallbackInitData_ = com.google.protobuf.ByteString.EMPTY; + call_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServerCallbackName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getCallCount(); i++) { + if (!getCall(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getServerCallbackNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, serverCallbackInitData_); + } + for (int i = 0; i < call_.size(); i++) { + output.writeMessage(3, call_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getServerCallbackNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, serverCallbackInitData_); + } + for (int i = 0; i < call_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, call_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest) obj; + + boolean result = true; + result = result && (hasServerCallbackName() == other.hasServerCallbackName()); + if (hasServerCallbackName()) { + result = result && getServerCallbackName() + .equals(other.getServerCallbackName()); + } + result = result && (hasServerCallbackInitData() == other.hasServerCallbackInitData()); + if (hasServerCallbackInitData()) { + result = result && getServerCallbackInitData() + .equals(other.getServerCallbackInitData()); + } + result = result && getCallList() + .equals(other.getCallList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServerCallbackName()) { + hash = (37 * hash) + SERVER_CALLBACK_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerCallbackName().hashCode(); + } + if (hasServerCallbackInitData()) { + hash = (37 * hash) + SERVER_CALLBACK_INITDATA_FIELD_NUMBER; + hash = (53 * hash) + getServerCallbackInitData().hashCode(); + } + if (getCallCount() > 0) { + hash = (37 * hash) + CALL_FIELD_NUMBER; + hash = (53 * hash) + getCallList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code BatchCoprocessorServiceRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCallFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + serverCallbackName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + serverCallbackInitData_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + if (callBuilder_ == null) { + call_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + callBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest build() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.serverCallbackName_ = serverCallbackName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.serverCallbackInitData_ = serverCallbackInitData_; + if (callBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + call_ = java.util.Collections.unmodifiableList(call_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.call_ = call_; + } else { + result.call_ = callBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.getDefaultInstance()) return this; + if (other.hasServerCallbackName()) { + bitField0_ |= 0x00000001; + serverCallbackName_ = other.serverCallbackName_; + onChanged(); + } + if (other.hasServerCallbackInitData()) { + setServerCallbackInitData(other.getServerCallbackInitData()); + } + if (callBuilder_ == null) { + if (!other.call_.isEmpty()) { + if (call_.isEmpty()) { + call_ = other.call_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureCallIsMutable(); + call_.addAll(other.call_); + } + onChanged(); + } + } else { + if (!other.call_.isEmpty()) { + if (callBuilder_.isEmpty()) { + callBuilder_.dispose(); + callBuilder_ = null; + call_ = other.call_; + bitField0_ = (bitField0_ & ~0x00000004); + callBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getCallFieldBuilder() : null; + } else { + callBuilder_.addAllMessages(other.call_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServerCallbackName()) { + + return false; + } + for (int i = 0; i < getCallCount(); i++) { + if (!getCall(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string server_callback_name = 1; + private java.lang.Object serverCallbackName_ = ""; + /** + * required string server_callback_name = 1; + */ + public boolean hasServerCallbackName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server_callback_name = 1; + */ + public java.lang.String getServerCallbackName() { + java.lang.Object ref = serverCallbackName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + serverCallbackName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string server_callback_name = 1; + */ + public com.google.protobuf.ByteString + getServerCallbackNameBytes() { + java.lang.Object ref = serverCallbackName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serverCallbackName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string server_callback_name = 1; + */ + public Builder setServerCallbackName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + serverCallbackName_ = value; + onChanged(); + return this; + } + /** + * required string server_callback_name = 1; + */ + public Builder clearServerCallbackName() { + bitField0_ = (bitField0_ & ~0x00000001); + serverCallbackName_ = getDefaultInstance().getServerCallbackName(); + onChanged(); + return this; + } + /** + * required string server_callback_name = 1; + */ + public Builder setServerCallbackNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + serverCallbackName_ = value; + onChanged(); + return this; + } + + // optional bytes server_callback_initData = 2; + private com.google.protobuf.ByteString serverCallbackInitData_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes server_callback_initData = 2; + */ + public boolean hasServerCallbackInitData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bytes server_callback_initData = 2; + */ + public com.google.protobuf.ByteString getServerCallbackInitData() { + return serverCallbackInitData_; + } + /** + * optional bytes server_callback_initData = 2; + */ + public Builder setServerCallbackInitData(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + serverCallbackInitData_ = value; + onChanged(); + return this; + } + /** + * optional bytes server_callback_initData = 2; + */ + public Builder clearServerCallbackInitData() { + bitField0_ = (bitField0_ & ~0x00000002); + serverCallbackInitData_ = getDefaultInstance().getServerCallbackInitData(); + onChanged(); + return this; + } + + // repeated .BatchCoprocessorServiceCall call = 3; + private java.util.List call_ = + java.util.Collections.emptyList(); + private void ensureCallIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + call_ = new java.util.ArrayList(call_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCallOrBuilder> callBuilder_; + + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public java.util.List getCallList() { + if (callBuilder_ == null) { + return java.util.Collections.unmodifiableList(call_); + } else { + return callBuilder_.getMessageList(); + } + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public int getCallCount() { + if (callBuilder_ == null) { + return call_.size(); + } else { + return callBuilder_.getCount(); + } + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall getCall(int index) { + if (callBuilder_ == null) { + return call_.get(index); + } else { + return callBuilder_.getMessage(index); + } + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder setCall( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall value) { + if (callBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCallIsMutable(); + call_.set(index, value); + onChanged(); + } else { + callBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder setCall( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder builderForValue) { + if (callBuilder_ == null) { + ensureCallIsMutable(); + call_.set(index, builderForValue.build()); + onChanged(); + } else { + callBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder addCall(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall value) { + if (callBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCallIsMutable(); + call_.add(value); + onChanged(); + } else { + callBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder addCall( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall value) { + if (callBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCallIsMutable(); + call_.add(index, value); + onChanged(); + } else { + callBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder addCall( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder builderForValue) { + if (callBuilder_ == null) { + ensureCallIsMutable(); + call_.add(builderForValue.build()); + onChanged(); + } else { + callBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder addCall( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder builderForValue) { + if (callBuilder_ == null) { + ensureCallIsMutable(); + call_.add(index, builderForValue.build()); + onChanged(); + } else { + callBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder addAllCall( + java.lang.Iterable values) { + if (callBuilder_ == null) { + ensureCallIsMutable(); + super.addAll(values, call_); + onChanged(); + } else { + callBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder clearCall() { + if (callBuilder_ == null) { + call_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + callBuilder_.clear(); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public Builder removeCall(int index) { + if (callBuilder_ == null) { + ensureCallIsMutable(); + call_.remove(index); + onChanged(); + } else { + callBuilder_.remove(index); + } + return this; + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder getCallBuilder( + int index) { + return getCallFieldBuilder().getBuilder(index); + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCallOrBuilder getCallOrBuilder( + int index) { + if (callBuilder_ == null) { + return call_.get(index); } else { + return callBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public java.util.List + getCallOrBuilderList() { + if (callBuilder_ != null) { + return callBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(call_); + } + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder addCallBuilder() { + return getCallFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.getDefaultInstance()); + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder addCallBuilder( + int index) { + return getCallFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.getDefaultInstance()); + } + /** + * repeated .BatchCoprocessorServiceCall call = 3; + */ + public java.util.List + getCallBuilderList() { + return getCallFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCallOrBuilder> + getCallFieldBuilder() { + if (callBuilder_ == null) { + callBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCallOrBuilder>( + call_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + call_ = null; + } + return callBuilder_; + } + + // @@protoc_insertion_point(builder_scope:BatchCoprocessorServiceRequest) + } + + static { + defaultInstance = new BatchCoprocessorServiceRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BatchCoprocessorServiceRequest) + } + + public interface BatchCoprocessorServiceResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NameBytesPair value = 1; + /** + * required .NameBytesPair value = 1; + */ + boolean hasValue(); + /** + * required .NameBytesPair value = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getValue(); + /** + * required .NameBytesPair value = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder getValueOrBuilder(); + } + /** + * Protobuf type {@code BatchCoprocessorServiceResponse} + */ + public static final class BatchCoprocessorServiceResponse extends + com.google.protobuf.GeneratedMessage + implements BatchCoprocessorServiceResponseOrBuilder { + // Use BatchCoprocessorServiceResponse.newBuilder() to construct. + private BatchCoprocessorServiceResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BatchCoprocessorServiceResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BatchCoprocessorServiceResponse defaultInstance; + public static BatchCoprocessorServiceResponse getDefaultInstance() { + return defaultInstance; + } + + public BatchCoprocessorServiceResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BatchCoprocessorServiceResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = value_.toBuilder(); + } + value_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(value_); + value_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BatchCoprocessorServiceResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BatchCoprocessorServiceResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .NameBytesPair value = 1; + public static final int VALUE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair value_; + /** + * required .NameBytesPair value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .NameBytesPair value = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getValue() { + return value_; + } + /** + * required .NameBytesPair value = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder getValueOrBuilder() { + return value_; + } + + private void initFields() { + value_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + if (!getValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse) obj; + + boolean result = true; + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && getValue() + .equals(other.getValue()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code BatchCoprocessorServiceResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getValueFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (valueBuilder_ == null) { + value_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_BatchCoprocessorServiceResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse build() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (valueBuilder_ == null) { + result.value_ = value_; + } else { + result.value_ = valueBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.getDefaultInstance()) return this; + if (other.hasValue()) { + mergeValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasValue()) { + + return false; + } + if (!getValue().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .NameBytesPair value = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair value_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder> valueBuilder_; + /** + * required .NameBytesPair value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .NameBytesPair value = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getValue() { + if (valueBuilder_ == null) { + return value_; + } else { + return valueBuilder_.getMessage(); + } + } + /** + * required .NameBytesPair value = 1; + */ + public Builder setValue(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + onChanged(); + } else { + valueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .NameBytesPair value = 1; + */ + public Builder setValue( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder builderForValue) { + if (valueBuilder_ == null) { + value_ = builderForValue.build(); + onChanged(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .NameBytesPair value = 1; + */ + public Builder mergeValue(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair value) { + if (valueBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + value_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance()) { + value_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.newBuilder(value_).mergeFrom(value).buildPartial(); + } else { + value_ = value; + } + onChanged(); + } else { + valueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .NameBytesPair value = 1; + */ + public Builder clearValue() { + if (valueBuilder_ == null) { + value_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); + onChanged(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .NameBytesPair value = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder getValueBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getValueFieldBuilder().getBuilder(); + } + /** + * required .NameBytesPair value = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder getValueOrBuilder() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilder(); + } else { + return value_; + } + } + /** + * required .NameBytesPair value = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder> + getValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder>( + value_, + getParentForChildren(), + isClean()); + value_ = null; + } + return valueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:BatchCoprocessorServiceResponse) + } + + static { + defaultInstance = new BatchCoprocessorServiceResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BatchCoprocessorServiceResponse) + } + + /** * Protobuf service {@code ClientService} */ public static abstract class ClientService @@ -28371,6 +30700,14 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ExecBatchService(.BatchCoprocessorServiceRequest) returns (.BatchCoprocessorServiceResponse); + */ + public abstract void execBatchService( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -28424,6 +30761,14 @@ impl.multi(controller, request, done); } + @java.lang.Override + public void execBatchService( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest request, + com.google.protobuf.RpcCallback done) { + impl.execBatchService(controller, request, done); + } + }; } @@ -28458,6 +30803,8 @@ return impl.execService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); case 5: return impl.multi(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest)request); + case 6: + return impl.execBatchService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -28484,6 +30831,8 @@ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -28510,6 +30859,8 @@ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -28566,6 +30917,14 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ExecBatchService(.BatchCoprocessorServiceRequest) returns (.BatchCoprocessorServiceResponse); + */ + public abstract void execBatchService( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -28618,6 +30977,11 @@ com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 6: + this.execBatchService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -28644,6 +31008,8 @@ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -28670,6 +31036,8 @@ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -28780,6 +31148,21 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance())); } + + public void execBatchService( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -28817,6 +31200,11 @@ com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse execBatchService( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -28897,6 +31285,18 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse execBatchService( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:ClientService) @@ -29042,6 +31442,21 @@ private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_MultiResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BatchCoprocessorServiceCall_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BatchCoprocessorServiceCall_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BatchCoprocessorServiceRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BatchCoprocessorServiceRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BatchCoprocessorServiceResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BatchCoprocessorServiceResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -29139,14 +31554,24 @@ "#\n\014regionAction\030\001 \003(\0132\r.RegionAction\022\022\n\n" + "nonceGroup\030\002 \001(\004\"@\n\rMultiResponse\022/\n\022reg" + "ionActionResult\030\001 \003(\0132\023.RegionActionResu" + - "lt2\261\002\n\rClientService\022 \n\003Get\022\013.GetRequest" + - "\032\014.GetResponse\022)\n\006Mutate\022\016.MutateRequest", - "\032\017.MutateResponse\022#\n\004Scan\022\014.ScanRequest\032" + - "\r.ScanResponse\022>\n\rBulkLoadHFile\022\025.BulkLo" + - "adHFileRequest\032\026.BulkLoadHFileResponse\022F" + - "\n\013ExecService\022\032.CoprocessorServiceReques" + - "t\032\033.CoprocessorServiceResponse\022&\n\005Multi\022" + - "\r.MultiRequest\032\016.MultiResponseBB\n*org.ap" + + "lt\"f\n\033BatchCoprocessorServiceCall\022 \n\006reg" + + "ion\030\001 \002(\0132\020.RegionSpecifier\022%\n\004call\030\002 \002(", + "\0132\027.CoprocessorServiceCall\"\214\001\n\036BatchCopr" + + "ocessorServiceRequest\022\034\n\024server_callback" + + "_name\030\001 \002(\t\022 \n\030server_callback_initData\030" + + "\002 \001(\014\022*\n\004call\030\003 \003(\0132\034.BatchCoprocessorSe" + + "rviceCall\"@\n\037BatchCoprocessorServiceResp" + + "onse\022\035\n\005value\030\001 \002(\0132\016.NameBytesPair2\210\003\n\r" + + "ClientService\022 \n\003Get\022\013.GetRequest\032\014.GetR" + + "esponse\022)\n\006Mutate\022\016.MutateRequest\032\017.Muta" + + "teResponse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanR" + + "esponse\022>\n\rBulkLoadHFile\022\025.BulkLoadHFile", + "Request\032\026.BulkLoadHFileResponse\022F\n\013ExecS" + + "ervice\022\032.CoprocessorServiceRequest\032\033.Cop" + + "rocessorServiceResponse\022&\n\005Multi\022\r.Multi" + + "Request\032\016.MultiResponse\022U\n\020ExecBatchServ" + + "ice\022\037.BatchCoprocessorServiceRequest\032 .B" + + "atchCoprocessorServiceResponseBB\n*org.ap" + "ache.hadoop.hbase.protobuf.generatedB\014Cl" + "ientProtosH\001\210\001\001\240\001\001" }; @@ -29323,6 +31748,24 @@ com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiResponse_descriptor, new java.lang.String[] { "RegionActionResult", }); + internal_static_BatchCoprocessorServiceCall_descriptor = + getDescriptor().getMessageTypes().get(25); + internal_static_BatchCoprocessorServiceCall_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BatchCoprocessorServiceCall_descriptor, + new java.lang.String[] { "Region", "Call", }); + internal_static_BatchCoprocessorServiceRequest_descriptor = + getDescriptor().getMessageTypes().get(26); + internal_static_BatchCoprocessorServiceRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BatchCoprocessorServiceRequest_descriptor, + new java.lang.String[] { "ServerCallbackName", "ServerCallbackInitData", "Call", }); + internal_static_BatchCoprocessorServiceResponse_descriptor = + getDescriptor().getMessageTypes().get(27); + internal_static_BatchCoprocessorServiceResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BatchCoprocessorServiceResponse_descriptor, + new java.lang.String[] { "Value", }); return null; } }; Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java (revision 23090) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java (revision 23094) @@ -75,6 +75,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; @@ -560,4 +562,10 @@ public ServerNonceManager getNonceManager() { return null; } + + @Override + public BatchCoprocessorServiceResponse execBatchService(RpcController controller, + BatchCoprocessorServiceRequest request) throws ServiceException { + return null; + } } Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java (revision 23090) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java (revision 23094) @@ -25,10 +25,16 @@ import static org.junit.Assert.fail; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -41,10 +47,12 @@ import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.coprocessor.BatchCoprocessorServiceClient; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.coprocessor.batch.ServerCallback; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; @@ -52,11 +60,15 @@ import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import com.google.protobuf.ZeroCopyLiteralByteString; @@ -324,6 +336,154 @@ } } + @Test + public void testAggregationInBatchCoprocessorServicee() throws Throwable { + HTable table = new HTable(util.getConfiguration(), TEST_TABLE); + final Map results = Collections.synchronizedMap(new TreeMap()); + ExecutorService pool = new ThreadPoolExecutor(1, 10, 60, TimeUnit.SECONDS, + new SynchronousQueue(), Threads.newDaemonThreadFactory("htable")); + ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); + List> keyRanges = new ArrayList>(); + keyRanges.add(new Pair(ROWS[0], ROWS[ROWS.length - 1])); + BatchCoprocessorServiceClient.batchCoprocessorService( + ColumnAggregationProtos.ColumnAggregationService.class, table, pool, keyRanges, + new Batch.Call() { + @Override + public Long call(ColumnAggregationProtos.ColumnAggregationService instance) + throws IOException { + BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); + ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest + .newBuilder(); + builder.setFamily(ZeroCopyLiteralByteString.wrap(TEST_FAMILY)); + if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { + builder.setQualifier(ZeroCopyLiteralByteString.wrap(TEST_QUALIFIER)); + } + instance.sum(null, builder.build(), rpcCallback); + return rpcCallback.get().getSum(); + } + }, MultiplyAndMerge.class.getName(), Bytes.toBytes(2), new Batch.BatchCallback() { + + @Override + public void update(ServerName serverName, Long result) { + results.put(serverName, result); + } + + }); + int sumResult = 0; + int expectedResult = 0; + for (Map.Entry e : results.entrySet()) { + LOG.info("Got value " + e.getValue() + " for server " + e.getKey()); + sumResult += e.getValue(); + } + for (int i = 0; i < ROWSIZE; i++) { + expectedResult += i; + } + assertEquals("Invalid result", expectedResult * 2, sumResult); + + results.clear(); + keyRanges.clear(); + + // scan: for region 2 and region 3 + keyRanges.add(new Pair(ROWS[rowSeperator1], ROWS[ROWS.length - 1])); + BatchCoprocessorServiceClient.batchCoprocessorService( + ColumnAggregationProtos.ColumnAggregationService.class, table, pool, keyRanges, + new Batch.Call() { + @Override + public Long call(ColumnAggregationProtos.ColumnAggregationService instance) + throws IOException { + BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); + ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest + .newBuilder(); + builder.setFamily(ZeroCopyLiteralByteString.wrap(TEST_FAMILY)); + if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { + builder.setQualifier(ZeroCopyLiteralByteString.wrap(TEST_QUALIFIER)); + } + instance.sum(null, builder.build(), rpcCallback); + return rpcCallback.get().getSum(); + } + }, MultiplyAndMerge.class.getName(), Bytes.toBytes(2), new Batch.BatchCallback() { + + @Override + public void update(ServerName serverName, Long result) { + results.put(serverName, result); + } + + }); + sumResult = 0; + expectedResult = 0; + for (Map.Entry e : results.entrySet()) { + LOG.info("Got value " + e.getValue() + " for server " + e.getKey()); + sumResult += e.getValue(); + } + for (int i = rowSeperator1; i < ROWSIZE; i++) { + expectedResult += i; + } + assertEquals("Invalid result", expectedResult * 2, sumResult); + pool.shutdown(); + table.close(); + } + + @Test + public void testAggregationInBatchCoprocessorServiceWithReturnValue() throws Throwable { + HTable table = new HTable(util.getConfiguration(), TEST_TABLE); + Map results = sumInBatchCoprocessorServer(table, TEST_FAMILY, TEST_QUALIFIER, + ROWS[0], ROWS[ROWS.length - 1]); + int sumResult = 0; + int expectedResult = 0; + for (Map.Entry e : results.entrySet()) { + LOG.info("Got value " + e.getValue() + " for server " + e.getKey()); + sumResult += e.getValue(); + } + for (int i = 0; i < ROWSIZE; i++) { + expectedResult += i; + } + assertEquals("Invalid result", expectedResult * 2, sumResult); + + results.clear(); + + // scan: for region 2 and region 3 + results = sumInBatchCoprocessorServer(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], + ROWS[ROWS.length - 1]); + sumResult = 0; + expectedResult = 0; + for (Map.Entry e : results.entrySet()) { + LOG.info("Got value " + e.getValue() + " for server " + e.getKey()); + sumResult += e.getValue(); + } + for (int i = rowSeperator1; i < ROWSIZE; i++) { + expectedResult += i; + } + assertEquals("Invalid result", expectedResult * 2, sumResult); + table.close(); + } + + public static class MultiplyAndMerge implements ServerCallback { + + protected int multiply = 1; + long result = 0; + + @Override + public void update(byte[] region, Message message) throws IOException { + synchronized (this) { + org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumResponse response = (org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumResponse) message; + long r = response.getSum(); + result += r * multiply; + } + } + + @Override + public void init(ByteString initData) { + multiply = Bytes.toInt(initData.toByteArray()); + } + + @Override + public Message getResult() { + return org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumResponse + .newBuilder().setSum(this.result).build(); + } + + } + private static byte[][] makeN(byte[] base, int n) { byte[][] ret = new byte[n][]; for (int i = 0; i < n; i++) { @@ -332,5 +492,34 @@ return ret; } + private Map sumInBatchCoprocessorServer(final HTable table, + final byte[] family, final byte[] qualifier, final byte[] start, final byte[] end) + throws IOException, Throwable { + ExecutorService pool = new ThreadPoolExecutor(1, 10, 60, TimeUnit.SECONDS, + new SynchronousQueue(), Threads.newDaemonThreadFactory("htable")); + ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); + List> keyRanges = new ArrayList>(); + keyRanges.add(new Pair(start, end)); + Map result = BatchCoprocessorServiceClient.batchCoprocessorService( + ColumnAggregationProtos.ColumnAggregationService.class, table, pool, keyRanges, + new Batch.Call() { + @Override + public Long call(ColumnAggregationProtos.ColumnAggregationService instance) + throws IOException { + BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); + ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest + .newBuilder(); + builder.setFamily(ZeroCopyLiteralByteString.wrap(family)); + if (qualifier != null && qualifier.length > 0) { + builder.setQualifier(ZeroCopyLiteralByteString.wrap(qualifier)); + } + instance.sum(null, builder.build(), rpcCallback); + return rpcCallback.get().getSum(); + } + }, MultiplyAndMerge.class.getName(), Bytes.toBytes(2)); + pool.shutdown(); + return result; + } + } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 23090) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 23094) @@ -43,9 +43,17 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.TreeSet; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.management.ObjectName; @@ -93,6 +101,7 @@ import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.batch.ServerCallback; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.exceptions.OperationConflictException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; @@ -152,8 +161,11 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceCall; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; @@ -518,6 +530,9 @@ private UserProvider userProvider; + //thread pool for batch coprocessor execution + protected java.util.concurrent.ExecutorService pool; + /** * Starts a HRegionServer at the default location * @@ -565,6 +580,26 @@ HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); + int poolSize = conf.getInt("hbase.regionserver.batch.coprocessor.pool.size", Integer.MAX_VALUE); + if (poolSize <= 0) { + poolSize = Integer.MAX_VALUE; + } + final SynchronousQueue blockingQueue = new SynchronousQueue(); + RejectedExecutionHandler rejectHandler = new RejectedExecutionHandler() { + + @Override + public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { + try { + blockingQueue.put(r); + } catch (InterruptedException e) { + throw new RejectedExecutionException(e); + } + } + }; + pool = new ThreadPoolExecutor(1, poolSize, 60, TimeUnit.SECONDS, + blockingQueue, Threads.newDaemonThreadFactory("batch-coprocessor-"), rejectHandler); + ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); + // Server to handle client requests. String hostname = conf.get("hbase.regionserver.ipc.address", Strings.domainNamePointerToHostName(DNS.getDefaultHost( @@ -4547,4 +4582,72 @@ respBuilder.setResponse(openInfoList.size()); return respBuilder.build(); } + + @Override + public BatchCoprocessorServiceResponse execBatchService(RpcController controller, + BatchCoprocessorServiceRequest request) throws ServiceException { + try { + requestCount.increment(); + String callbackClassName = request.getServerCallbackName(); + Class clazz = Class.forName(callbackClassName); + Object obj = clazz.newInstance(); + if (!(obj instanceof ServerCallback)) { + throw new ServiceException("Incorrect server callback class"); + } + final ServerCallback callback = (ServerCallback) obj; + callback.init(request.getServerCallbackInitData()); + // instantiate the callback name. the callback will do the result merge in the + // server side. + List calls = request.getCallList(); + Map> futures = + new TreeMap>(Bytes.BYTES_COMPARATOR); + for(final BatchCoprocessorServiceCall call : calls) { + Future future = pool.submit(new Callable(){ + + @Override + public Void call() throws Exception { + HRegion region = getRegion(call.getRegion()); + // ignore the passed in controller (from the serialized call) + ServerRpcController execController = new ServerRpcController(); + Message result = region.execService(execController, call.getCall()); + if (execController.getFailedOn() != null) { + throw execController.getFailedOn(); + } + callback.update(region.getRegionName(), result); + return null; + } + + }); + futures.put(call.getCall().getRow().toByteArray(), future); + } + + for (Map.Entry> e : futures.entrySet()) { + try { + e.getValue().get(); + } catch (ExecutionException ee) { + LOG.warn("Error calling coprocessor service for row " + + Bytes.toStringBinary(e.getKey()), ee); + throw new ServiceException(ee); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + LOG.warn("Interrupted calling coprocessor service for row " + + Bytes.toStringBinary(e.getKey()), ie); + throw new ServiceException(ie); + } + } + + BatchCoprocessorServiceResponse.Builder builder = + BatchCoprocessorServiceResponse.newBuilder(); + Message result = callback.getResult(); + builder.setValue(builder.getValueBuilder().setName(result.getClass().getName()) + .setValue(result.toByteString())); + return builder.build(); + } catch (ClassNotFoundException e1) { + throw new ServiceException(e1); + } catch (InstantiationException e1) { + throw new ServiceException(e1); + } catch (IllegalAccessException e1) { + throw new ServiceException(e1); + } + } } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/batch/ServerCallback.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/batch/ServerCallback.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/batch/ServerCallback.java (revision 23094) @@ -0,0 +1,55 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor.batch; + +import java.io.IOException; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; + +/** + * the connection is established to region servers. When all the executions of regions in a region + * server are finished, the ServerCallback will be instantiated and do the merge work in + * the server side. + */ +public interface ServerCallback { + + /** + * Merges the result in the side. + * It's invoked after each region is finished. + * @param region + * @param message + * @throws IOException + */ + public void update(byte[] region, Message message) throws IOException; + + /** + * Initializes the ServerCallback with the initData. + * @param initData + */ + public void init(ByteString initData); + + /** + * Gets the result. + * This result is returned back to client. + * + * @return + */ + public Message getResult(); +} Index: hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java =================================================================== --- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java (revision 23090) +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java (revision 23094) @@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; @@ -495,6 +497,12 @@ this.multiInvocationsCount.decrementAndGet(); } } + + @Override + public BatchCoprocessorServiceResponse execBatchService(RpcController controller, + BatchCoprocessorServiceRequest request) throws ServiceException { + throw new NotImplementedException(); + } } static ScanResponse doMetaScanResponse(final SortedMap> meta, Index: hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (revision 23090) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (revision 23094) @@ -86,6 +86,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; @@ -2623,4 +2625,29 @@ } return result; } + + /** + * Executes a coprocessor service. + * + * @param client + * @param calls + * @param serverCallbackClassName + * @param initData + * @return + * @throws IOException + */ + public static BatchCoprocessorServiceResponse execBatchService( + final ClientService.BlockingInterface client, + final List calls, String serverCallbackClassName, + byte[] initData) throws IOException { + BatchCoprocessorServiceRequest request = BatchCoprocessorServiceRequest.newBuilder() + .addAllCall(calls).setServerCallbackName(serverCallbackClassName) + .setServerCallbackInitData(initData == null ? null : ByteString.copyFrom(initData)).build(); + try { + BatchCoprocessorServiceResponse response = client.execBatchService(null, request); + return response; + } catch (ServiceException se) { + throw getRemoteException(se); + } + } } Index: hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerCoprocessorRpcChannel.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerCoprocessorRpcChannel.java (revision 0) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerCoprocessorRpcChannel.java (revision 23094) @@ -0,0 +1,117 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BatchRegionServerCallable; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BatchCoprocessorServiceResponse; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.ZeroCopyLiteralByteString; + +/** + * Provides clients with an RPC connection to call coprocessor endpoint + * {@link com.google.protobuf.Service}s against a given region server. An instance of this class may + * be obtained by calling + * {@link org.apache.hadoop.hbase.client.coprocessor.BatchCoprocessorServiceClient#batchCoprocessorService} + * , but should normally only be used in creating a new {@link com.google.protobuf.Service} stub to + * call the endpoint methods. + */ +public class RegionServerCoprocessorRpcChannel extends CoprocessorRpcChannel { + + private static Log LOG = LogFactory.getLog(RegionServerCoprocessorRpcChannel.class); + + protected final HConnection connection; + protected final TableName table; + protected RpcRetryingCallerFactory rpcFactory; + protected ServerName serverName; + protected List> rowRegionPairs; + protected final String serverCallbackClassName; + protected final byte[] initData; + + public RegionServerCoprocessorRpcChannel(HConnection conn, TableName table, + ServerName serverName, List> rowRegionPairs, + String serverCallbackClassName, byte[] initData) { + this.connection = conn; + this.table = table; + this.serverName = serverName; + this.rowRegionPairs = rowRegionPairs; + this.serverCallbackClassName = serverCallbackClassName; + this.initData = initData; + this.rpcFactory = RpcRetryingCallerFactory.instantiate(conn.getConfiguration()); + } + + @Override + protected Message callExecService(MethodDescriptor method, Message request, + Message responsePrototype) throws IOException { + if (LOG.isTraceEnabled()) { + LOG.trace("Call: " + method.getName() + ", " + request.toString()); + } + + final List batchCalls = new ArrayList(); + for (Pair rowRegionPair : rowRegionPairs) { + ClientProtos.CoprocessorServiceCall call = ClientProtos.CoprocessorServiceCall.newBuilder() + .setRow(ZeroCopyLiteralByteString.wrap(rowRegionPair.getFirst())) + .setServiceName(method.getService().getFullName()).setMethodName(method.getName()) + .setRequest(request.toByteString()).build(); + ClientProtos.BatchCoprocessorServiceCall batchCall = ClientProtos.BatchCoprocessorServiceCall + .newBuilder().setCall(call) + .setRegion(RequestConverter.buildRegionSpecifier(REGION_NAME, rowRegionPair.getSecond())) + .build(); + batchCalls.add(batchCall); + } + + BatchRegionServerCallable callable = new BatchRegionServerCallable( + connection, table, serverName) { + public BatchCoprocessorServiceResponse call() throws Exception { + return ProtobufUtil.execBatchService(getStub(), batchCalls, serverCallbackClassName, + initData); + } + }; + BatchCoprocessorServiceResponse result = rpcFactory + . newCaller().callWithRetries(callable); + Message response = null; + if (result.getValue().hasValue()) { + response = responsePrototype.newBuilderForType().mergeFrom(result.getValue().getValue()) + .build(); + } else { + response = responsePrototype.getDefaultInstanceForType(); + } + if (LOG.isTraceEnabled()) { + LOG.trace("Result is value=" + response); + } + return response; + } +} Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchRegionServerCallable.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchRegionServerCallable.java (revision 0) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchRegionServerCallable.java (revision 23094) @@ -0,0 +1,115 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.net.ConnectException; +import java.net.SocketTimeoutException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; + +/** + * Implementations of a batch call to a RegionServer and implement {@link #call()}. + * Passed to a {@link RpcRetryingCaller} so we retry on fail. + * @param the class that the ServerCallable handles + * + * @param + */ +public abstract class BatchRegionServerCallable implements RetryingCallable { + + static final Log LOG = LogFactory.getLog(BatchRegionServerCallable.class); + + protected final HConnection connection; + protected final TableName tableName; + protected final ServerName serverName; + protected ClientService.BlockingInterface stub; + protected final static int MIN_WAIT_DEAD_SERVER = 10000; + + /** + * @param connection + * Connection to use. + * @param tableName + * Table name to which row belongs. + * @param ServerName + * The server name we want to connect. + */ + public BatchRegionServerCallable(HConnection connection, TableName tableName, + ServerName serverName) { + this.connection = connection; + this.tableName = tableName; + this.serverName = serverName; + } + + /** + * Prepare for connection to the server. + * @param reload this is not used in this implementation + * @throws IOException e + */ + @Override + public void prepare(boolean reload) throws IOException { + setStub(connection.getClient(serverName)); + } + + @Override + public void throwable(Throwable t, boolean retrying) { + if (t instanceof SocketTimeoutException || t instanceof ConnectException + || t instanceof RetriesExhaustedException || connection.isDeadServer(serverName)) { + // if thrown these exceptions, we clear all the cache entries that + // map to that slow/dead server; otherwise, let cache miss and ask + // hbase:meta again to find the new location + connection.clearCaches(serverName); + } + } + + @Override + public String getExceptionMessageAdditionalDetail() { + return "table '" + tableName + " in the server " + serverName; + } + + @Override + public long sleep(long pause, int tries) { + // Tries hasn't been bumped up yet so we use "tries + 1" to get right pause time + long sleep = ConnectionUtils.getPauseTime(pause, tries + 1); + if (sleep < MIN_WAIT_DEAD_SERVER && connection.isDeadServer(serverName)) { + sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f); + } + return sleep; + } + + protected ClientService.BlockingInterface getStub() { + return this.stub; + } + + void setStub(final ClientService.BlockingInterface stub) { + this.stub = stub; + } + + public TableName getTableName() { + return this.tableName; + } + + public ServerName getServerName() { + return this.serverName; + } + +} Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java (revision 23090) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java (revision 23094) @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.ServerName; /** @@ -69,4 +70,23 @@ public interface Callback { void update(byte[] region, byte[] row, R result); } + + /** + * Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} result + * against each region server. + * + *

+ * When used with + * {@link org.apache.hadoop.hbase.client.coprocessor.BatchCoprocessorServiceClient# + * batchCoprocessorService(Class, HTable, byte[], Batch.Call, String, byte[], Batch.BatchCallback)} + * the implementation's {@link Batch.BatchCallback#update(ServerName, Object)} method will be + * called with the {@link Batch.Call#call(Object)} return value from each region server in the + * selected range. + *

+ * + * @param the return type from the associated {@link Batch.Call#call(Object)} + */ + public interface BatchCallback { + void update(ServerName serverName, R result); + } } Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BatchCoprocessorServiceClient.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BatchCoprocessorServiceClient.java (revision 0) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BatchCoprocessorServiceClient.java (revision 23094) @@ -0,0 +1,285 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.coprocessor; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NavigableMap; +import java.util.TreeMap; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.protobuf.Service; + +/** + * The client to execute the batch coprocessor service. + * The HTable.coprocessorService method establish the connections to each region, whereas the + * methods in BatchCoprocessorServiceClient establish the connections to each region server. + * This reduces the connections issued from the client side. + */ +public class BatchCoprocessorServiceClient { + private static final Log LOG = LogFactory.getLog(BatchCoprocessorServiceClient.class); + + /** + * + * Executes the batchCoprocessorService in all the region servers of the table. + * + * @param service the protocol buffer {@code Service} implementation to call + * @param table the table which the coprocessor is executed against + * @param pool the thread pool used in the BatchCoprocessorServiceClient + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method will be invoked once per table region, using the {@link Service} + * instance connected to that region. + * @param serverCallbackClassName the connection is established to region servers. + * When all the executions of regions in a region server are + * finished, the serverCallbackClassName will be instantiated + * and do the merge work in the server side. + * @param initData the initial data used in the serverCallbackClassName + * @return the map whose key is the server name and value is the result comes from the server + * @throws IOException + * @throws Throwable + */ + public static Map batchCoprocessorService( + final Class service, final HTable table, ExecutorService pool, + final Batch.Call callable, final String serverCallbackClassName, byte[] initData) + throws IOException, Throwable { + final Map results = Collections.synchronizedMap(new TreeMap()); + batchCoprocessorService(service, table, pool, callable, serverCallbackClassName, initData, + new Batch.BatchCallback() { + + @Override + public void update(ServerName serverName, R result) { + results.put(serverName, result); + } + }); + return results; + } + + /** + * Executes the batchCoprocessorService in all the region servers of the table. + * + * @param service the protocol buffer {@code Service} implementation to call + * @param table the table which the coprocessor is executed against + * @param pool the thread pool used in the BatchCoprocessorServiceClient + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method will be invoked once per table region, using the {@link Service} + * instance connected to that region. + * @param serverCallbackClassName the connection is established to region servers. + * When all the executions of regions in a region server are + * finished, the serverCallbackClassName will be instantiated + * and do the merge work in the server side. + * @param initData the initial data used in the serverCallbackClassName + * @param callback + * @throws IOException + * @throws Throwable + */ + public static void batchCoprocessorService(final Class service, + final HTable table, ExecutorService pool, final Batch.Call callable, + final String serverCallbackClassName, byte[] initData, + final Batch.BatchCallback callback) throws IOException, Throwable { + List> keyRanges = new ArrayList>(); + keyRanges.add(new Pair(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW)); + batchCoprocessorService(service, table, pool, keyRanges, callable, serverCallbackClassName, + initData, callback); + } + + /** + * Executes the batchCoprocessorService in the region servers where the keyRanges belong to. + * + * @param service the protocol buffer {@code Service} implementation to call + * @param table the table which the coprocessor is executed against + * @param pool the thread pool used in the BatchCoprocessorServiceClient + * @param keyRanges the coprocessor is executed only in the region servers where this key ranges + * belong to + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method will be invoked once per table region, using the {@link Service} + * instance connected to that region. + * @param serverCallbackClassName the connection is established to region servers. + * When all the executions of regions in a region server are + * finished, the serverCallbackClassName will be instantiated + * and do the merge work in the server side. + * @param initData the initial data used in the serverCallbackClassName + * @return the map whose key is the server name and value is the result comes from the server + * @throws IOException + * @throws Throwable + */ + public static Map batchCoprocessorService( + final Class service, final HTable table, ExecutorService pool, + final List> keyRanges, final Batch.Call callable, + final String serverCallbackClassName, byte[] initData) throws IOException, Throwable { + final Map results = Collections.synchronizedMap(new TreeMap()); + batchCoprocessorService(service, table, pool, keyRanges, callable, serverCallbackClassName, + initData, new Batch.BatchCallback() { + + @Override + public void update(ServerName serverName, R result) { + results.put(serverName, result); + } + }); + return results; + } + + /** + * Executes the batchCoprocessorService in the region servers where the keyRanges belong to. + * + * @param service the protocol buffer {@code Service} implementation to call + * @param table the table which the coprocessor is executed against + * @param pool the thread pool used in the BatchCoprocessorServiceClient + * @param keyRanges the coprocessor is executed only in the region servers where this key ranges + * belong to + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method will be invoked once per table region, using the {@link Service} + * instance connected to that region. + * @param serverCallbackClassName the connection is established to region servers. + * When all the executions of regions in a region server are + * finished, the serverCallbackClassName will be instantiated + * and do the merge work in the server side. + * @param initData the initial data used in the serverCallbackClassName + * @param callback + * @throws IOException + * @throws Throwable + */ + @SuppressWarnings("deprecation") + public static void batchCoprocessorService(final Class service, + final HTable table, ExecutorService pool, final List> keyRanges, + final Batch.Call callable, final String serverCallbackClassName, byte[] initData, + final Batch.BatchCallback callback) throws IOException, Throwable { + Map>> serverRegions = new HashMap>>(); + NavigableMap regions = table.getRegionLocations(); + for (Pair keyRange : keyRanges) { + byte[] start = keyRange.getFirst(); + byte[] end = keyRange.getSecond(); + for (Entry entry : regions.entrySet()) { + HRegionInfo region = entry.getKey(); + boolean include = false; + byte[] startKey = region.getStartKey(); + byte[] endKey = region.getEndKey(); + if (Bytes.compareTo(start, startKey) >= 0) { + if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) || Bytes.compareTo(start, endKey) < 0) { + include = true; + } + } else if (Bytes.equals(end, HConstants.EMPTY_END_ROW) + || Bytes.compareTo(startKey, end) <= 0) { + include = true; + } else { + break; // past stop + } + + if (include) { + ServerName server = entry.getValue(); + List> rowRegionPairs = serverRegions.get(server); + if (rowRegionPairs == null) { + rowRegionPairs = new ArrayList>(); + serverRegions.put(server, rowRegionPairs); + } + rowRegionPairs + .add(new Pair(region.getStartKey(), region.getRegionName())); + } + } + } + batchCoprocessorService(table.getName(), service, + HConnectionManager.getConnection(table.getConfiguration()), pool, serverRegions, callable, + serverCallbackClassName, initData, callback); + } + + /** + * Executes the batchCoprocessorService in the selected region servers. + * + * @param service the protocol buffer {@code Service} implementation to call + * @param table the table which the coprocessor is executed against + * @param connection the connection + * @param pool the thread pool used in the BatchCoprocessorServiceClient + * @param serverRegions the map of the server name and the pair of the region names and rows. + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method will be invoked once per table region, using the {@link Service} + * instance connected to that region. + * @param serverCallbackClassName the connection is established to region servers. + * When all the executions of regions in a region server are + * finished, the serverCallbackClassName will be instantiated + * and do the merge work in the server side. + * @param initData the initial data used in the serverCallbackClassName + * @param callback + * @throws IOException + * @throws Throwable + */ + protected static void batchCoprocessorService(TableName tableName, + final Class service, final HConnection connection, ExecutorService pool, + Map>> serverRegions, final Batch.Call callable, + final String serverCallbackClassName, byte[] initData, + final Batch.BatchCallback callback) throws IOException, Throwable { + Map> futures = new TreeMap>(); + for (Entry>> entry : serverRegions.entrySet()) { + final RegionServerCoprocessorRpcChannel channel = new RegionServerCoprocessorRpcChannel( + connection, tableName, entry.getKey(), entry.getValue(), serverCallbackClassName, initData); + final ServerName serverName = entry.getKey(); + Future future = pool.submit(new Callable() { + public R call() throws Exception { + T instance = ProtobufUtil.newServiceStub(service, channel); + R result = callable.call(instance); + if (callback != null) { + callback.update(serverName, result); + } + return result; + } + }); + futures.put(serverName.getHostAndPort(), future); + } + for (Map.Entry> e : futures.entrySet()) { + try { + e.getValue().get(); + } catch (ExecutionException ee) { + LOG.warn( + "Error calling coprocessor service " + service.getName() + " in the server " + + e.getKey(), ee); + throw ee.getCause(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException("Interrupted calling coprocessor service " + + service.getName() + " in the server " + e.getKey()).initCause(ie); + } + } + } +}