diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java index 607764c..2eba07f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -18,55 +18,134 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.SortedSet; import java.util.TreeSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.WrongRegionException; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiRowMutationService; + +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; /** * This class demonstrates how to implement atomic multi row transactions using * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)} * and Coprocessor endpoints. + * + * Defines a protocol to perform multi row transactions. + * See {@link MultiRowMutationEndpoint} for the implementation. + *
+ * See + * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)} + * for details and limitations. + *
+ * Example: + *
+ * List mutations = ...;
+ * Put p1 = new Put(row1);
+ * Put p2 = new Put(row2);
+ * ...
+ * Mutate m1 = ProtobufUtil.toMutate(MutateType.PUT, p1);
+ * Mutate m2 = ProtobufUtil.toMutate(MutateType.PUT, p2);
+ * MultiMutateRequest.Builder mrmBuilder = MultiMutateRequest.newBuilder();
+ * mrmBuilder.addMutatationRequest(m1);
+ * mrmBuilder.addMutatationRequest(m2);
+ * CoprocessorRpcChannel channel = t.coprocessorService(ROW);
+ * MultiRowMutationService.BlockingInterface service = 
+ *    MultiRowMutationService.newBlockingStub(channel);
+ * MultiMutateRequest mrm = mrmBuilder.build();
+ * service.mutateRows(null, mrm);
+ * 
*/ @InterfaceAudience.Public @InterfaceStability.Evolving -public class MultiRowMutationEndpoint extends BaseEndpointCoprocessor implements - MultiRowMutationProtocol { - +public class MultiRowMutationEndpoint extends MultiRowMutationService implements +CoprocessorService, Coprocessor { + private RegionCoprocessorEnvironment env; @Override - public void mutateRows(List mutations) throws IOException { - // get the coprocessor environment - RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) getEnvironment(); - - // set of rows to lock, sorted to avoid deadlocks - SortedSet rowsToLock = new TreeSet(Bytes.BYTES_COMPARATOR); + public void mutateRows(RpcController controller, MultiMutateRequest request, + RpcCallback done) { + MultiMutateResponse response = MultiMutateResponse.getDefaultInstance(); + try { + // set of rows to lock, sorted to avoid deadlocks + SortedSet rowsToLock = new TreeSet(Bytes.BYTES_COMPARATOR); + List mutateRequestList = request.getMutatationRequestList(); + List mutations = new ArrayList(mutateRequestList.size()); + for (Mutate m : mutateRequestList) { + mutations.add(ProtobufUtil.toMutation(m)); + } - HRegionInfo regionInfo = env.getRegion().getRegionInfo(); - for (Mutation m : mutations) { - // check whether rows are in range for this region - if (!HRegion.rowIsInRange(regionInfo, m.getRow())) { - String msg = "Requested row out of range '" - + Bytes.toStringBinary(m.getRow()) + "'"; - if (rowsToLock.isEmpty()) { - // if this is the first row, region might have moved, - // allow client to retry - throw new WrongRegionException(msg); - } else { - // rows are split between regions, do not retry - throw new DoNotRetryIOException(msg); + HRegionInfo regionInfo = env.getRegion().getRegionInfo(); + for (Mutation m : mutations) { + // check whether rows are in range for this region + if (!HRegion.rowIsInRange(regionInfo, m.getRow())) { + String msg = "Requested row out of range '" + + Bytes.toStringBinary(m.getRow()) + "'"; + if (rowsToLock.isEmpty()) { + // if this is the first row, region might have moved, + // allow client to retry + throw new WrongRegionException(msg); + } else { + // rows are split between regions, do not retry + throw new DoNotRetryIOException(msg); + } } + rowsToLock.add(m.getRow()); } - rowsToLock.add(m.getRow()); + // call utility method on region + env.getRegion().mutateRowsWithLocks(mutations, rowsToLock); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } - // call utility method on region - env.getRegion().mutateRowsWithLocks(mutations, rowsToLock); + done.run(response); + } + + + @Override + public Service getService() { + return this; + } + + /** + * Stores a reference to the coprocessor environment provided by the + * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded + * on a table region, so always expects this to be an instance of + * {@link RegionCoprocessorEnvironment}. + * @param env the environment provided by the coprocessor host + * @throws IOException if the provided environment is not an instance of + * {@code RegionCoprocessorEnvironment} + */ + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (env instanceof RegionCoprocessorEnvironment) { + this.env = (RegionCoprocessorEnvironment)env; + } else { + throw new CoprocessorException("Must be loaded on a table region!"); + } + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + // nothing to do } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationProtocol.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationProtocol.java deleted file mode 100644 index 2913582..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationProtocol.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.coprocessor; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; -import org.apache.hadoop.hbase.regionserver.HRegion; - -/** - * Defines a protocol to perform multi row transactions. - * See {@link MultiRowMutationEndpoint} for the implementation. - *
- * See - * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)} - * for details and limitations. - *
- * Example: - *
- * List mutations = ...;
- * Put p1 = new Put(row1);
- * Put p2 = new Put(row2);
- * ...
- * mutations.add(p1);
- * mutations.add(p2);
- * MultiRowMutationProtocol mrOp = t.coprocessorProxy(
- *   MultiRowMutationProtocol.class, row1);
- * mrOp.mutateRows(mutations);
- * 
- */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface MultiRowMutationProtocol extends CoprocessorProtocol { - public void mutateRows(List mutations) throws IOException; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index a0ee6c0..66b0ecf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -531,6 +531,27 @@ public final class ProtobufUtil { } /** + * Convert a MutateRequest to Mutation + * + * @param proto the protocol buffer Mutate to convert + * @return the converted Mutation + * @throws IOException + */ + public static Mutation toMutation(final Mutate proto) throws IOException { + MutateType type = proto.getMutateType(); + if (type == MutateType.APPEND) { + return toAppend(proto); + } + if (type == MutateType.DELETE) { + return toDelete(proto); + } + if (type == MutateType.PUT) { + return toPut(proto); + } + throw new IOException("Not an understood mutate type " + type); + } + + /** * Convert a protocol buffer Mutate to an Increment * * @param proto the protocol buffer Mutate to convert diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutation.java new file mode 100644 index 0000000..142723a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutation.java @@ -0,0 +1,1184 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: MultiRowMutation.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class MultiRowMutation { + private MultiRowMutation() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface MultiMutateRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .Mutate mutatationRequest = 1; + java.util.List + getMutatationRequestList(); + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate getMutatationRequest(int index); + int getMutatationRequestCount(); + java.util.List + getMutatationRequestOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateOrBuilder getMutatationRequestOrBuilder( + int index); + } + public static final class MultiMutateRequest extends + com.google.protobuf.GeneratedMessage + implements MultiMutateRequestOrBuilder { + // Use MultiMutateRequest.newBuilder() to construct. + private MultiMutateRequest(Builder builder) { + super(builder); + } + private MultiMutateRequest(boolean noInit) {} + + private static final MultiMutateRequest defaultInstance; + public static MultiMutateRequest getDefaultInstance() { + return defaultInstance; + } + + public MultiMutateRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateRequest_fieldAccessorTable; + } + + // repeated .Mutate mutatationRequest = 1; + public static final int MUTATATIONREQUEST_FIELD_NUMBER = 1; + private java.util.List mutatationRequest_; + public java.util.List getMutatationRequestList() { + return mutatationRequest_; + } + public java.util.List + getMutatationRequestOrBuilderList() { + return mutatationRequest_; + } + public int getMutatationRequestCount() { + return mutatationRequest_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate getMutatationRequest(int index) { + return mutatationRequest_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateOrBuilder getMutatationRequestOrBuilder( + int index) { + return mutatationRequest_.get(index); + } + + private void initFields() { + mutatationRequest_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getMutatationRequestCount(); i++) { + if (!getMutatationRequest(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < mutatationRequest_.size(); i++) { + output.writeMessage(1, mutatationRequest_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < mutatationRequest_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, mutatationRequest_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest) obj; + + boolean result = true; + result = result && getMutatationRequestList() + .equals(other.getMutatationRequestList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getMutatationRequestCount() > 0) { + hash = (37 * hash) + MUTATATIONREQUEST_FIELD_NUMBER; + hash = (53 * hash) + getMutatationRequestList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getMutatationRequestFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (mutatationRequestBuilder_ == null) { + mutatationRequest_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + mutatationRequestBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest(this); + int from_bitField0_ = bitField0_; + if (mutatationRequestBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + mutatationRequest_ = java.util.Collections.unmodifiableList(mutatationRequest_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.mutatationRequest_ = mutatationRequest_; + } else { + result.mutatationRequest_ = mutatationRequestBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.getDefaultInstance()) return this; + if (mutatationRequestBuilder_ == null) { + if (!other.mutatationRequest_.isEmpty()) { + if (mutatationRequest_.isEmpty()) { + mutatationRequest_ = other.mutatationRequest_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMutatationRequestIsMutable(); + mutatationRequest_.addAll(other.mutatationRequest_); + } + onChanged(); + } + } else { + if (!other.mutatationRequest_.isEmpty()) { + if (mutatationRequestBuilder_.isEmpty()) { + mutatationRequestBuilder_.dispose(); + mutatationRequestBuilder_ = null; + mutatationRequest_ = other.mutatationRequest_; + bitField0_ = (bitField0_ & ~0x00000001); + mutatationRequestBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getMutatationRequestFieldBuilder() : null; + } else { + mutatationRequestBuilder_.addAllMessages(other.mutatationRequest_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getMutatationRequestCount(); i++) { + if (!getMutatationRequest(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addMutatationRequest(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // repeated .Mutate mutatationRequest = 1; + private java.util.List mutatationRequest_ = + java.util.Collections.emptyList(); + private void ensureMutatationRequestIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + mutatationRequest_ = new java.util.ArrayList(mutatationRequest_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateOrBuilder> mutatationRequestBuilder_; + + public java.util.List getMutatationRequestList() { + if (mutatationRequestBuilder_ == null) { + return java.util.Collections.unmodifiableList(mutatationRequest_); + } else { + return mutatationRequestBuilder_.getMessageList(); + } + } + public int getMutatationRequestCount() { + if (mutatationRequestBuilder_ == null) { + return mutatationRequest_.size(); + } else { + return mutatationRequestBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate getMutatationRequest(int index) { + if (mutatationRequestBuilder_ == null) { + return mutatationRequest_.get(index); + } else { + return mutatationRequestBuilder_.getMessage(index); + } + } + public Builder setMutatationRequest( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate value) { + if (mutatationRequestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutatationRequestIsMutable(); + mutatationRequest_.set(index, value); + onChanged(); + } else { + mutatationRequestBuilder_.setMessage(index, value); + } + return this; + } + public Builder setMutatationRequest( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder builderForValue) { + if (mutatationRequestBuilder_ == null) { + ensureMutatationRequestIsMutable(); + mutatationRequest_.set(index, builderForValue.build()); + onChanged(); + } else { + mutatationRequestBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addMutatationRequest(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate value) { + if (mutatationRequestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutatationRequestIsMutable(); + mutatationRequest_.add(value); + onChanged(); + } else { + mutatationRequestBuilder_.addMessage(value); + } + return this; + } + public Builder addMutatationRequest( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate value) { + if (mutatationRequestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutatationRequestIsMutable(); + mutatationRequest_.add(index, value); + onChanged(); + } else { + mutatationRequestBuilder_.addMessage(index, value); + } + return this; + } + public Builder addMutatationRequest( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder builderForValue) { + if (mutatationRequestBuilder_ == null) { + ensureMutatationRequestIsMutable(); + mutatationRequest_.add(builderForValue.build()); + onChanged(); + } else { + mutatationRequestBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addMutatationRequest( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder builderForValue) { + if (mutatationRequestBuilder_ == null) { + ensureMutatationRequestIsMutable(); + mutatationRequest_.add(index, builderForValue.build()); + onChanged(); + } else { + mutatationRequestBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllMutatationRequest( + java.lang.Iterable values) { + if (mutatationRequestBuilder_ == null) { + ensureMutatationRequestIsMutable(); + super.addAll(values, mutatationRequest_); + onChanged(); + } else { + mutatationRequestBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearMutatationRequest() { + if (mutatationRequestBuilder_ == null) { + mutatationRequest_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + mutatationRequestBuilder_.clear(); + } + return this; + } + public Builder removeMutatationRequest(int index) { + if (mutatationRequestBuilder_ == null) { + ensureMutatationRequestIsMutable(); + mutatationRequest_.remove(index); + onChanged(); + } else { + mutatationRequestBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder getMutatationRequestBuilder( + int index) { + return getMutatationRequestFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateOrBuilder getMutatationRequestOrBuilder( + int index) { + if (mutatationRequestBuilder_ == null) { + return mutatationRequest_.get(index); } else { + return mutatationRequestBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getMutatationRequestOrBuilderList() { + if (mutatationRequestBuilder_ != null) { + return mutatationRequestBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mutatationRequest_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder addMutatationRequestBuilder() { + return getMutatationRequestFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder addMutatationRequestBuilder( + int index) { + return getMutatationRequestFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.getDefaultInstance()); + } + public java.util.List + getMutatationRequestBuilderList() { + return getMutatationRequestFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateOrBuilder> + getMutatationRequestFieldBuilder() { + if (mutatationRequestBuilder_ == null) { + mutatationRequestBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateOrBuilder>( + mutatationRequest_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + mutatationRequest_ = null; + } + return mutatationRequestBuilder_; + } + + // @@protoc_insertion_point(builder_scope:MultiMutateRequest) + } + + static { + defaultInstance = new MultiMutateRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MultiMutateRequest) + } + + public interface MultiMutateResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class MultiMutateResponse extends + com.google.protobuf.GeneratedMessage + implements MultiMutateResponseOrBuilder { + // Use MultiMutateResponse.newBuilder() to construct. + private MultiMutateResponse(Builder builder) { + super(builder); + } + private MultiMutateResponse(boolean noInit) {} + + private static final MultiMutateResponse defaultInstance; + public static MultiMutateResponse getDefaultInstance() { + return defaultInstance; + } + + public MultiMutateResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.internal_static_MultiMutateResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:MultiMutateResponse) + } + + static { + defaultInstance = new MultiMutateResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MultiMutateResponse) + } + + public static abstract class MultiRowMutationService + implements com.google.protobuf.Service { + protected MultiRowMutationService() {} + + public interface Interface { + public abstract void mutateRows( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new MultiRowMutationService() { + @java.lang.Override + public void mutateRows( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest request, + com.google.protobuf.RpcCallback done) { + impl.mutateRows(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.mutateRows(controller, (org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void mutateRows( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.mutateRows(controller, (org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiRowMutationService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void mutateRows( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse mutateRows( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse mutateRows( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.getDefaultInstance()); + } + + } + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MultiMutateRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MultiMutateRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MultiMutateResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MultiMutateResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\026MultiRowMutation.proto\032\014Client.proto\"8" + + "\n\022MultiMutateRequest\022\"\n\021mutatationReques" + + "t\030\001 \003(\0132\007.Mutate\"\025\n\023MultiMutateResponse2" + + "R\n\027MultiRowMutationService\0227\n\nmutateRows" + + "\022\023.MultiMutateRequest\032\024.MultiMutateRespo" + + "nseBF\n*org.apache.hadoop.hbase.protobuf." + + "generatedB\020MultiRowMutationH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_MultiMutateRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_MultiMutateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MultiMutateRequest_descriptor, + new java.lang.String[] { "MutatationRequest", }, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest.Builder.class); + internal_static_MultiMutateResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_MultiMutateResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MultiMutateResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-server/src/main/protobuf/MultiRowMutation.proto b/hbase-server/src/main/protobuf/MultiRowMutation.proto new file mode 100644 index 0000000..ebd2c49 --- /dev/null +++ b/hbase-server/src/main/protobuf/MultiRowMutation.proto @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import "Client.proto"; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "MultiRowMutation"; +option java_generate_equals_and_hash = true; +option java_generic_services = true; +option optimize_for = SPEED; + +message MultiMutateRequest { + repeated Mutate mutatationRequest = 1; +} + +message MultiMutateResponse { +} + +service MultiRowMutationService { + rpc mutateRows(MultiMutateRequest) + returns(MultiMutateResponse); +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 2385212..685875e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -46,10 +46,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationProtocol; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -64,6 +64,12 @@ import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.filter.WhileMatchFilter; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiRowMutationService; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; @@ -4175,16 +4181,22 @@ public class TestFromClientSide { final byte [] ROW1 = Bytes.toBytes("testRow1"); HTable t = TEST_UTIL.createTable(TABLENAME, FAMILY); - List mrm = new ArrayList(); Put p = new Put(ROW); p.add(FAMILY, QUALIFIER, VALUE); - mrm.add(p); + Mutate m1 = ProtobufUtil.toMutate(MutateType.PUT, p); + p = new Put(ROW1); p.add(FAMILY, QUALIFIER, VALUE); - mrm.add(p); - MultiRowMutationProtocol mr = t.coprocessorProxy( - MultiRowMutationProtocol.class, ROW); - mr.mutateRows(mrm); + Mutate m2 = ProtobufUtil.toMutate(MutateType.PUT, p); + + MultiMutateRequest.Builder mrmBuilder = MultiMutateRequest.newBuilder(); + mrmBuilder.addMutatationRequest(m1); + mrmBuilder.addMutatationRequest(m2); + MultiMutateRequest mrm = mrmBuilder.build(); + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + service.mutateRows(null, mrm); Get g = new Get(ROW); Result r = t.get(g); assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)));