Index: hbase-server/src/main/protobuf/Aggregate.proto =================================================================== --- hbase-server/src/main/protobuf/Aggregate.proto (revision 0) +++ hbase-server/src/main/protobuf/Aggregate.proto (revision 0) @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "AggregateProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "Client.proto"; + +message AggregateArgument { + /** The argument passed to the AggregateService consists of three parts + * (1) the (canonical) classname of the ColumnInterpreter implementation + * (2) the Scan query + * (3) any bytes required to construct the ColumnInterpreter object + * properly + */ + required string interpreterClassName = 1; + required Scan scan = 2; + optional bytes interpreterSpecificBytes = 3; +} + +message AggregateResponse { + /** + * The AggregateService methods all have a response that either is a Pair + * or a simple object. When it is a Pair both firstPart and secondPart + * have defined values (and the secondPart is not present in the response + * when the response is not a pair). Refer to the AggregateImplementation + * class for an overview of the AggregateResponse object constructions. + */ + repeated bytes firstPart = 1; + optional bytes secondPart = 2; +} + +/** Refer to the AggregateImplementation class for an overview of the + * AggregateService method implementations and their functionality. + */ +service AggregateService { + rpc getMax (AggregateArgument) returns (AggregateResponse); + rpc getMin (AggregateArgument) returns (AggregateResponse); + rpc getSum (AggregateArgument) returns (AggregateResponse); + rpc getRowNum (AggregateArgument) returns (AggregateResponse); + rpc getAvg (AggregateArgument) returns (AggregateResponse); + rpc getStd (AggregateArgument) returns (AggregateResponse); + rpc getMedian (AggregateArgument) returns (AggregateResponse); +} \ No newline at end of file Index: hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java (revision 0) @@ -0,0 +1,1829 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: Aggregate.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class AggregateProtos { + private AggregateProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface AggregateArgumentOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string interpreterClassName = 1; + boolean hasInterpreterClassName(); + String getInterpreterClassName(); + + // required .Scan scan = 2; + boolean hasScan(); + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan(); + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder getScanOrBuilder(); + + // optional bytes interpreterSpecificBytes = 3; + boolean hasInterpreterSpecificBytes(); + com.google.protobuf.ByteString getInterpreterSpecificBytes(); + } + public static final class AggregateArgument extends + com.google.protobuf.GeneratedMessage + implements AggregateArgumentOrBuilder { + // Use AggregateArgument.newBuilder() to construct. + private AggregateArgument(Builder builder) { + super(builder); + } + private AggregateArgument(boolean noInit) {} + + private static final AggregateArgument defaultInstance; + public static AggregateArgument getDefaultInstance() { + return defaultInstance; + } + + public AggregateArgument getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateArgument_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateArgument_fieldAccessorTable; + } + + private int bitField0_; + // required string interpreterClassName = 1; + public static final int INTERPRETERCLASSNAME_FIELD_NUMBER = 1; + private java.lang.Object interpreterClassName_; + public boolean hasInterpreterClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getInterpreterClassName() { + java.lang.Object ref = interpreterClassName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + interpreterClassName_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getInterpreterClassNameBytes() { + java.lang.Object ref = interpreterClassName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + interpreterClassName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .Scan scan = 2; + public static final int SCAN_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan scan_; + public boolean hasScan() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan() { + return scan_; + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder getScanOrBuilder() { + return scan_; + } + + // optional bytes interpreterSpecificBytes = 3; + public static final int INTERPRETERSPECIFICBYTES_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString interpreterSpecificBytes_; + public boolean hasInterpreterSpecificBytes() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getInterpreterSpecificBytes() { + return interpreterSpecificBytes_; + } + + private void initFields() { + interpreterClassName_ = ""; + scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance(); + interpreterSpecificBytes_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasInterpreterClassName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasScan()) { + memoizedIsInitialized = 0; + return false; + } + if (!getScan().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getInterpreterClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, scan_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, interpreterSpecificBytes_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getInterpreterClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, scan_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, interpreterSpecificBytes_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument other = (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument) obj; + + boolean result = true; + result = result && (hasInterpreterClassName() == other.hasInterpreterClassName()); + if (hasInterpreterClassName()) { + result = result && getInterpreterClassName() + .equals(other.getInterpreterClassName()); + } + result = result && (hasScan() == other.hasScan()); + if (hasScan()) { + result = result && getScan() + .equals(other.getScan()); + } + result = result && (hasInterpreterSpecificBytes() == other.hasInterpreterSpecificBytes()); + if (hasInterpreterSpecificBytes()) { + result = result && getInterpreterSpecificBytes() + .equals(other.getInterpreterSpecificBytes()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasInterpreterClassName()) { + hash = (37 * hash) + INTERPRETERCLASSNAME_FIELD_NUMBER; + hash = (53 * hash) + getInterpreterClassName().hashCode(); + } + if (hasScan()) { + hash = (37 * hash) + SCAN_FIELD_NUMBER; + hash = (53 * hash) + getScan().hashCode(); + } + if (hasInterpreterSpecificBytes()) { + hash = (37 * hash) + INTERPRETERSPECIFICBYTES_FIELD_NUMBER; + hash = (53 * hash) + getInterpreterSpecificBytes().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgumentOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateArgument_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateArgument_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getScanFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + interpreterClassName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (scanBuilder_ == null) { + scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance(); + } else { + scanBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + interpreterSpecificBytes_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument build() { + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument result = new org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.interpreterClassName_ = interpreterClassName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (scanBuilder_ == null) { + result.scan_ = scan_; + } else { + result.scan_ = scanBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.interpreterSpecificBytes_ = interpreterSpecificBytes_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance()) return this; + if (other.hasInterpreterClassName()) { + setInterpreterClassName(other.getInterpreterClassName()); + } + if (other.hasScan()) { + mergeScan(other.getScan()); + } + if (other.hasInterpreterSpecificBytes()) { + setInterpreterSpecificBytes(other.getInterpreterSpecificBytes()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasInterpreterClassName()) { + + return false; + } + if (!hasScan()) { + + return false; + } + if (!getScan().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + interpreterClassName_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.newBuilder(); + if (hasScan()) { + subBuilder.mergeFrom(getScan()); + } + input.readMessage(subBuilder, extensionRegistry); + setScan(subBuilder.buildPartial()); + break; + } + case 26: { + bitField0_ |= 0x00000004; + interpreterSpecificBytes_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string interpreterClassName = 1; + private java.lang.Object interpreterClassName_ = ""; + public boolean hasInterpreterClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getInterpreterClassName() { + java.lang.Object ref = interpreterClassName_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + interpreterClassName_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setInterpreterClassName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + interpreterClassName_ = value; + onChanged(); + return this; + } + public Builder clearInterpreterClassName() { + bitField0_ = (bitField0_ & ~0x00000001); + interpreterClassName_ = getDefaultInstance().getInterpreterClassName(); + onChanged(); + return this; + } + void setInterpreterClassName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + interpreterClassName_ = value; + onChanged(); + } + + // required .Scan scan = 2; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder> scanBuilder_; + public boolean hasScan() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan() { + if (scanBuilder_ == null) { + return scan_; + } else { + return scanBuilder_.getMessage(); + } + } + public Builder setScan(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan value) { + if (scanBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + scan_ = value; + onChanged(); + } else { + scanBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setScan( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder builderForValue) { + if (scanBuilder_ == null) { + scan_ = builderForValue.build(); + onChanged(); + } else { + scanBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeScan(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan value) { + if (scanBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + scan_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance()) { + scan_ = + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.newBuilder(scan_).mergeFrom(value).buildPartial(); + } else { + scan_ = value; + } + onChanged(); + } else { + scanBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearScan() { + if (scanBuilder_ == null) { + scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance(); + onChanged(); + } else { + scanBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder getScanBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getScanFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder getScanOrBuilder() { + if (scanBuilder_ != null) { + return scanBuilder_.getMessageOrBuilder(); + } else { + return scan_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder> + getScanFieldBuilder() { + if (scanBuilder_ == null) { + scanBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder>( + scan_, + getParentForChildren(), + isClean()); + scan_ = null; + } + return scanBuilder_; + } + + // optional bytes interpreterSpecificBytes = 3; + private com.google.protobuf.ByteString interpreterSpecificBytes_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasInterpreterSpecificBytes() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getInterpreterSpecificBytes() { + return interpreterSpecificBytes_; + } + public Builder setInterpreterSpecificBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + interpreterSpecificBytes_ = value; + onChanged(); + return this; + } + public Builder clearInterpreterSpecificBytes() { + bitField0_ = (bitField0_ & ~0x00000004); + interpreterSpecificBytes_ = getDefaultInstance().getInterpreterSpecificBytes(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:AggregateArgument) + } + + static { + defaultInstance = new AggregateArgument(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AggregateArgument) + } + + public interface AggregateResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated bytes firstPart = 1; + java.util.List getFirstPartList(); + int getFirstPartCount(); + com.google.protobuf.ByteString getFirstPart(int index); + + // optional bytes secondPart = 2; + boolean hasSecondPart(); + com.google.protobuf.ByteString getSecondPart(); + } + public static final class AggregateResponse extends + com.google.protobuf.GeneratedMessage + implements AggregateResponseOrBuilder { + // Use AggregateResponse.newBuilder() to construct. + private AggregateResponse(Builder builder) { + super(builder); + } + private AggregateResponse(boolean noInit) {} + + private static final AggregateResponse defaultInstance; + public static AggregateResponse getDefaultInstance() { + return defaultInstance; + } + + public AggregateResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateResponse_fieldAccessorTable; + } + + private int bitField0_; + // repeated bytes firstPart = 1; + public static final int FIRSTPART_FIELD_NUMBER = 1; + private java.util.List firstPart_; + public java.util.List + getFirstPartList() { + return firstPart_; + } + public int getFirstPartCount() { + return firstPart_.size(); + } + public com.google.protobuf.ByteString getFirstPart(int index) { + return firstPart_.get(index); + } + + // optional bytes secondPart = 2; + public static final int SECONDPART_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString secondPart_; + public boolean hasSecondPart() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getSecondPart() { + return secondPart_; + } + + private void initFields() { + firstPart_ = java.util.Collections.emptyList();; + secondPart_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < firstPart_.size(); i++) { + output.writeBytes(1, firstPart_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(2, secondPart_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < firstPart_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(firstPart_.get(i)); + } + size += dataSize; + size += 1 * getFirstPartList().size(); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, secondPart_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse other = (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) obj; + + boolean result = true; + result = result && getFirstPartList() + .equals(other.getFirstPartList()); + result = result && (hasSecondPart() == other.hasSecondPart()); + if (hasSecondPart()) { + result = result && getSecondPart() + .equals(other.getSecondPart()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getFirstPartCount() > 0) { + hash = (37 * hash) + FIRSTPART_FIELD_NUMBER; + hash = (53 * hash) + getFirstPartList().hashCode(); + } + if (hasSecondPart()) { + hash = (37 * hash) + SECONDPART_FIELD_NUMBER; + hash = (53 * hash) + getSecondPart().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.internal_static_AggregateResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + firstPart_ = java.util.Collections.emptyList();; + bitField0_ = (bitField0_ & ~0x00000001); + secondPart_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse result = new org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + firstPart_ = java.util.Collections.unmodifiableList(firstPart_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.firstPart_ = firstPart_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.secondPart_ = secondPart_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()) return this; + if (!other.firstPart_.isEmpty()) { + if (firstPart_.isEmpty()) { + firstPart_ = other.firstPart_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFirstPartIsMutable(); + firstPart_.addAll(other.firstPart_); + } + onChanged(); + } + if (other.hasSecondPart()) { + setSecondPart(other.getSecondPart()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + ensureFirstPartIsMutable(); + firstPart_.add(input.readBytes()); + break; + } + case 18: { + bitField0_ |= 0x00000002; + secondPart_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // repeated bytes firstPart = 1; + private java.util.List firstPart_ = java.util.Collections.emptyList();; + private void ensureFirstPartIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + firstPart_ = new java.util.ArrayList(firstPart_); + bitField0_ |= 0x00000001; + } + } + public java.util.List + getFirstPartList() { + return java.util.Collections.unmodifiableList(firstPart_); + } + public int getFirstPartCount() { + return firstPart_.size(); + } + public com.google.protobuf.ByteString getFirstPart(int index) { + return firstPart_.get(index); + } + public Builder setFirstPart( + int index, com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFirstPartIsMutable(); + firstPart_.set(index, value); + onChanged(); + return this; + } + public Builder addFirstPart(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFirstPartIsMutable(); + firstPart_.add(value); + onChanged(); + return this; + } + public Builder addAllFirstPart( + java.lang.Iterable values) { + ensureFirstPartIsMutable(); + super.addAll(values, firstPart_); + onChanged(); + return this; + } + public Builder clearFirstPart() { + firstPart_ = java.util.Collections.emptyList();; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + // optional bytes secondPart = 2; + private com.google.protobuf.ByteString secondPart_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasSecondPart() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getSecondPart() { + return secondPart_; + } + public Builder setSecondPart(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + secondPart_ = value; + onChanged(); + return this; + } + public Builder clearSecondPart() { + bitField0_ = (bitField0_ & ~0x00000002); + secondPart_ = getDefaultInstance().getSecondPart(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:AggregateResponse) + } + + static { + defaultInstance = new AggregateResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AggregateResponse) + } + + public static abstract class AggregateService + implements com.google.protobuf.Service { + protected AggregateService() {} + + public interface Interface { + public abstract void getMax( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getMin( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getSum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getRowNum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getAvg( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getStd( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getMedian( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new AggregateService() { + @java.lang.Override + public void getMax( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + impl.getMax(controller, request, done); + } + + @java.lang.Override + public void getMin( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + impl.getMin(controller, request, done); + } + + @java.lang.Override + public void getSum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + impl.getSum(controller, request, done); + } + + @java.lang.Override + public void getRowNum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + impl.getRowNum(controller, request, done); + } + + @java.lang.Override + public void getAvg( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + impl.getAvg(controller, request, done); + } + + @java.lang.Override + public void getStd( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + impl.getStd(controller, request, done); + } + + @java.lang.Override + public void getMedian( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + impl.getMedian(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.getMax(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request); + case 1: + return impl.getMin(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request); + case 2: + return impl.getSum(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request); + case 3: + return impl.getRowNum(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request); + case 4: + return impl.getAvg(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request); + case 5: + return impl.getStd(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request); + case 6: + return impl.getMedian(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void getMax( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getMin( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getSum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getRowNum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getAvg( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getStd( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public abstract void getMedian( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.getMax(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.getMin(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.getSum(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 3: + this.getRowNum(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 4: + this.getAvg(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: + this.getStd(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.getMedian(controller, (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void getMax( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance())); + } + + public void getMin( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance())); + } + + public void getSum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance())); + } + + public void getRowNum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance())); + } + + public void getAvg( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance())); + } + + public void getStd( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance())); + } + + public void getMedian( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getMax( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getMin( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getSum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getRowNum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getAvg( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getStd( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getMedian( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getMax( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getMin( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getSum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getRowNum( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getAvg( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getStd( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse getMedian( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.getDefaultInstance()); + } + + } + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AggregateArgument_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AggregateArgument_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AggregateResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AggregateResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\017Aggregate.proto\032\014Client.proto\"h\n\021Aggre" + + "gateArgument\022\034\n\024interpreterClassName\030\001 \002" + + "(\t\022\023\n\004scan\030\002 \002(\0132\005.Scan\022 \n\030interpreterSp" + + "ecificBytes\030\003 \001(\014\":\n\021AggregateResponse\022\021" + + "\n\tfirstPart\030\001 \003(\014\022\022\n\nsecondPart\030\002 \001(\0142\366\002" + + "\n\020AggregateService\0220\n\006getMax\022\022.Aggregate" + + "Argument\032\022.AggregateResponse\0220\n\006getMin\022\022" + + ".AggregateArgument\032\022.AggregateResponse\0220" + + "\n\006getSum\022\022.AggregateArgument\032\022.Aggregate" + + "Response\0223\n\tgetRowNum\022\022.AggregateArgumen", + "t\032\022.AggregateResponse\0220\n\006getAvg\022\022.Aggreg" + + "ateArgument\032\022.AggregateResponse\0220\n\006getSt" + + "d\022\022.AggregateArgument\032\022.AggregateRespons" + + "e\0223\n\tgetMedian\022\022.AggregateArgument\032\022.Agg" + + "regateResponseBE\n*org.apache.hadoop.hbas" + + "e.protobuf.generatedB\017AggregateProtosH\001\210" + + "\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_AggregateArgument_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_AggregateArgument_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AggregateArgument_descriptor, + new java.lang.String[] { "InterpreterClassName", "Scan", "InterpreterSpecificBytes", }, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument.Builder.class); + internal_static_AggregateResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_AggregateResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AggregateResponse_descriptor, + new java.lang.String[] { "FirstPart", "SecondPart", }, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java (revision 1396131) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java (working copy) @@ -18,9 +18,8 @@ */ package org.apache.hadoop.hbase.client.coprocessor; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; +import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -28,6 +27,8 @@ import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; import org.apache.hadoop.hbase.util.Bytes; +import com.google.protobuf.ByteString; + /** * a concrete column interpreter implementation. The cell value is a Long value * and its promoted data type is also a Long value. For computing aggregation @@ -86,24 +87,55 @@ } @Override - public void readFields(DataInput arg0) throws IOException { - // nothing to serialize + public double divideForAvg(Long l1, Long l2) { + return (l2 == null || l1 == null) ? Double.NaN : (l1.doubleValue() / l2 + .doubleValue()); } @Override - public void write(DataOutput arg0) throws IOException { - // nothing to serialize + public Long castToReturnType(Long o) { + return o; } + @Override - public double divideForAvg(Long l1, Long l2) { - return (l2 == null || l1 == null) ? Double.NaN : (l1.doubleValue() / l2 - .doubleValue()); + public Long parseResponseAsPromotedType(byte[] response) { + ByteBuffer b = ByteBuffer.allocate(8).put(response); + b.rewind(); + long l = b.getLong(); + return l; } @Override - public Long castToReturnType(Long o) { - return o; + public Long castToCellType(Long l) { + return l; } -} + @Override + public ByteString columnInterpreterSpecificData() { + // nothing + return null; + } + + @Override + public void initialize(ByteString bytes) { + // nothing + } + + @Override + public ByteString getProtoForCellType(Long t) { + return getProtoForPromotedOrCellType(t); + } + + @Override + public ByteString getProtoForPromotedType(Long s) { + return getProtoForPromotedOrCellType(s); + } + + private ByteString getProtoForPromotedOrCellType(Long s) { + ByteBuffer bb = ByteBuffer.allocate(8).putLong(s); + bb.rewind(); + ByteString bs = ByteString.copyFrom(bb); + return bs; + } +} \ No newline at end of file Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java (revision 1396131) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java (working copy) @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client.coprocessor; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -39,16 +40,23 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.coprocessor.AggregateProtocol; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; +import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; +import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import com.google.protobuf.ByteString; + /** * This client class is for invoking the aggregate functions deployed on the - * Region Server side via the AggregateProtocol. This class will implement the + * Region Server side via the AggregateService. This class will implement the * supporting functionality for summing/processing the individual results - * obtained from the AggregateProtocol for each region. + * obtained from the AggregateService for each region. *

* This will serve as the client side handler for invoking the aggregate * functions. @@ -92,7 +100,7 @@ */ public R max(final byte[] tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - validateParameters(scan); + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); class MaxCallBack implements Batch.Callback { R max = null; @@ -109,11 +117,24 @@ HTable table = null; try { table = new HTable(conf, tableName); - table.coprocessorExec(AggregateProtocol.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { @Override - public R call(AggregateProtocol instance) throws IOException { - return instance.getMax(ci, scan); + public R call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getMax(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + if (response.getFirstPartCount() > 0) { + return ci.castToCellType( + ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0)))); + } + return null; } }, aMaxCallBack); } finally { @@ -149,7 +170,7 @@ */ public R min(final byte[] tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - validateParameters(scan); + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); class MinCallBack implements Batch.Callback { private R min = null; @@ -167,12 +188,25 @@ HTable table = null; try { table = new HTable(conf, tableName); - table.coprocessorExec(AggregateProtocol.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { @Override - public R call(AggregateProtocol instance) throws IOException { - return instance.getMin(ci, scan); + public R call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getMin(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + if (response.getFirstPartCount() > 0) { + return ci.castToCellType( + ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0)))); + } + return null; } }, minCallBack); } finally { @@ -199,7 +233,7 @@ */ public long rowCount(final byte[] tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - validateParameters(scan); + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); class RowNumCallback implements Batch.Callback { private final AtomicLong rowCountL = new AtomicLong(0); @@ -216,11 +250,22 @@ HTable table = null; try { table = new HTable(conf, tableName); - table.coprocessorExec(AggregateProtocol.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { @Override - public Long call(AggregateProtocol instance) throws IOException { - return instance.getRowNum(ci, scan); + public Long call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getRowNum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); + ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); + bb.rewind(); + return bb.getLong(); } }, rowNum); } finally { @@ -242,7 +287,8 @@ */ public S sum(final byte[] tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - validateParameters(scan); + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + class SumCallBack implements Batch.Callback { S sumVal = null; @@ -259,11 +305,23 @@ HTable table = null; try { table = new HTable(conf, tableName); - table.coprocessorExec(AggregateProtocol.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { @Override - public S call(AggregateProtocol instance) throws IOException { - return instance.getSum(ci, scan); + public S call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getSum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + if (response.getFirstPartCount() == 0) { + return null; + } + return ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0))); } }, sumCallBack); } finally { @@ -284,7 +342,7 @@ */ private Pair getAvgArgs(final byte[] tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - validateParameters(scan); + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); class AvgCallBack implements Batch.Callback> { S sum = null; Long rowCount = 0l; @@ -303,13 +361,31 @@ HTable table = null; try { table = new HTable(conf, tableName); - table.coprocessorExec(AggregateProtocol.class, scan.getStartRow(), + table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { + new Batch.Call>() { @Override - public Pair call(AggregateProtocol instance) + public Pair call(AggregateService instance) throws IOException { - return instance.getAvg(ci, scan); + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getAvg(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + Pair pair = new Pair(null, 0L); + if (response.getFirstPartCount() == 0) { + return pair; + } + pair.setFirst(ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0)))); + ByteBuffer bb = ByteBuffer.allocate(8).put( + getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; } }, avgCallBack); } finally { @@ -351,7 +427,7 @@ */ private Pair, Long> getStdArgs(final byte[] tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - validateParameters(scan); + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); class StdCallback implements Batch.Callback, Long>> { long rowCountVal = 0l; S sumVal = null, sumSqVal = null; @@ -366,24 +442,48 @@ @Override public synchronized void update(byte[] region, byte[] row, Pair, Long> result) { - sumVal = ci.add(sumVal, result.getFirst().get(0)); - sumSqVal = ci.add(sumSqVal, result.getFirst().get(1)); - rowCountVal += result.getSecond(); + if (result.getFirst().size() > 0) { + sumVal = ci.add(sumVal, result.getFirst().get(0)); + sumSqVal = ci.add(sumSqVal, result.getFirst().get(1)); + rowCountVal += result.getSecond(); + } } } StdCallback stdCallback = new StdCallback(); HTable table = null; try { table = new HTable(conf, tableName); - table.coprocessorExec(AggregateProtocol.class, scan.getStartRow(), + table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call, Long>>() { + new Batch.Call, Long>>() { @Override - public Pair, Long> call(AggregateProtocol instance) + public Pair, Long> call(AggregateService instance) throws IOException { - return instance.getStd(ci, scan); + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getStd(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + Pair,Long> pair = + new Pair, Long>(new ArrayList(), 0L); + if (response.getFirstPartCount() == 0) { + return pair; + } + List list = new ArrayList(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + list.add(ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(i)))); + } + pair.setFirst(list); + ByteBuffer bb = ByteBuffer.allocate(8).put( + getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; } - }, stdCallback); } finally { if (table != null) { @@ -431,7 +531,7 @@ private Pair>, List> getMedianArgs(final byte[] tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - validateParameters(scan); + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); final NavigableMap> map = new TreeMap>(Bytes.BYTES_COMPARATOR); class StdCallback implements Batch.Callback> { @@ -457,11 +557,25 @@ HTable table = null; try { table = new HTable(conf, tableName); - table.coprocessorExec(AggregateProtocol.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call>() { + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call>() { @Override - public List call(AggregateProtocol instance) throws IOException { - return instance.getMedian(ci, scan); + public List call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getMedian(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + + List list = new ArrayList(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + list.add(ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(i)))); + } + return list; } }, stdCallback); @@ -557,4 +671,30 @@ } return null; } + + AggregateArgument validateArgAndGetPB(Scan scan, ColumnInterpreter ci) + throws IOException { + validateParameters(scan); + final AggregateArgument.Builder requestBuilder = + AggregateArgument.newBuilder(); + requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName()); + if (ci.columnInterpreterSpecificData() != null) { + requestBuilder.setInterpreterSpecificBytes( + ci.columnInterpreterSpecificData()); + } + requestBuilder.setScan(ProtobufUtil.toScan(scan)); + return requestBuilder.build(); + } + + byte[] getBytesFromResponse(ByteString response) { + ByteBuffer bb = response.asReadOnlyByteBuffer(); + bb.rewind(); + byte[] bytes; + if (bb.hasArray()) { + bytes = bb.array(); + } else { + bytes = response.toByteArray(); + } + return bytes; + } } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateProtocol.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateProtocol.java (revision 1396131) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateProtocol.java (working copy) @@ -1,147 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.coprocessor; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.coprocessor.AggregationClient; -import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; -import org.apache.hadoop.hbase.util.Pair; - -/** - * Defines the aggregation functions that are to be supported in this - * Coprocessor. For each method, it takes a Scan object and a columnInterpreter. - * The scan object should have a column family (else an exception will be - * thrown), and an optional column qualifier. In the current implementation - * {@link AggregateImplementation}, only one column family and column qualifier - * combination is served. In case there are more than one, only first one will - * be picked. Refer to {@link AggregationClient} for some general conditions on - * input parameters. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface AggregateProtocol extends CoprocessorProtocol { - public static final long VERSION = 1L; - - /** - * Gives the maximum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, maximum value for the - * entire column family will be returned. - * @param ci - * @param scan - * @return max value as mentioned above - * @throws IOException - */ - T getMax(ColumnInterpreter ci, Scan scan) throws IOException; - - /** - * Gives the minimum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, minimum value for the - * entire column family will be returned. - * @param ci - * @param scan - * @return min as mentioned above - * @throws IOException - */ - T getMin(ColumnInterpreter ci, Scan scan) throws IOException; - - /** - * Gives the sum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, sum for the entire column - * family will be returned. - * @param ci - * @param scan - * @return sum of values as defined by the column interpreter - * @throws IOException - */ - S getSum(ColumnInterpreter ci, Scan scan) throws IOException; - - /** - * @param ci - * @param scan - * @return Row count for the given column family and column qualifier, in - * the given row range as defined in the Scan object. - * @throws IOException - */ - long getRowNum(ColumnInterpreter ci, Scan scan) - throws IOException; - - /** - * Gives a Pair with first object as Sum and second object as row count, - * computed for a given combination of column qualifier and column family in - * the given row range as defined in the Scan object. In its current - * implementation, it takes one column family and one column qualifier (if - * provided). In case of null column qualifier, an aggregate sum over all the - * entire column family will be returned. - *

- * The average is computed in - * {@link AggregationClient#avg(byte[], ColumnInterpreter, Scan)} by - * processing results from all regions, so its "ok" to pass sum and a Long - * type. - * @param ci - * @param scan - * @return Average - * @throws IOException - */ - Pair getAvg(ColumnInterpreter ci, Scan scan) - throws IOException; - - /** - * Gives a Pair with first object a List containing Sum and sum of squares, - * and the second object as row count. It is computed for a given combination of - * column qualifier and column family in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * one column qualifier (if provided). The idea is get the value of variance first: - * the average of the squares less the square of the average a standard - * deviation is square root of variance. - * @param ci - * @param scan - * @return STD - * @throws IOException - */ - Pair, Long> getStd(ColumnInterpreter ci, Scan scan) - throws IOException; - - /** - * Gives a List containing sum of values and sum of weights. - * It is computed for the combination of column - * family and column qualifier(s) in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * two column qualifiers. The first qualifier is for values column and - * the second qualifier (optional) is for weight column. - * @param ci - * @param scan - * @return Pair - * @throws IOException - */ - List getMedian(ColumnInterpreter ci, Scan scan) - throws IOException; - -} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java (revision 1396131) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java (working copy) @@ -25,8 +25,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter; -import org.apache.hadoop.io.Writable; +import com.google.protobuf.ByteString; + /** * Defines how value for specific column is interpreted and provides utility * methods like compare, add, multiply etc for them. Takes column family, column @@ -48,7 +49,7 @@ */ @InterfaceAudience.Public @InterfaceStability.Evolving -public interface ColumnInterpreter extends Writable { +public interface ColumnInterpreter { /** * @param colFamily @@ -114,4 +115,50 @@ * @return Average */ double divideForAvg(S o, Long l); -} + + /** + * This method should return any additional data that is needed on the + * server side to construct the ColumnInterpreter. The server + * will pass this to the {@link #initialize(org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.ColumnInterpreter)} + * method. If there is no ColumnInterpreter specific data (for e.g., + * {@link LongColumnInterpreter}) then null should be returned. + * @return the PB message + */ + ByteString columnInterpreterSpecificData(); + + /** + * Return the PB for type T + * @param t + * @return PB-message + */ + ByteString getProtoForCellType(T t); + + /** + * Return the PB for type S + * @param s + * @return PB-message + */ + ByteString getProtoForPromotedType(S s); + + /** + * This method should initialize any field(s) of the ColumnInterpreter with + * a parsing of the passed message bytes (used on the server side). + * @param bytes + */ + void initialize(ByteString bytes); + + /** + * Converts the bytes in the server's response to the expected type S + * @param response + * @return response of type S constructed from the message + */ + S parseResponseAsPromotedType(byte[] response); + + /** + * The response message comes as type S. This will convert/cast it to T. + * In some sense, performs the opposite of {@link #castToReturnType(Object)} + * @param response + * @return cast + */ + T castToCellType(S response); +} \ No newline at end of file Index: hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java (revision 1396131) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; @@ -27,45 +28,59 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.coprocessor.AggregationClient; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hbase.ipc.ProtocolSignature; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.util.Pair; +import com.google.protobuf.ByteString; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + /** * A concrete AggregateProtocol implementation. Its system level coprocessor * that computes the aggregate function at a region level. + * @param + * @param */ @InterfaceAudience.Public @InterfaceStability.Evolving -public class AggregateImplementation extends BaseEndpointCoprocessor implements - AggregateProtocol { +public class AggregateImplementation extends AggregateService implements + CoprocessorService, Coprocessor { protected static Log log = LogFactory.getLog(AggregateImplementation.class); + private RegionCoprocessorEnvironment env; + /** + * Gives the maximum for a given combination of column qualifier and column + * family, in the given row range as defined in the Scan object. In its + * current implementation, it takes one column family and one column qualifier + * (if provided). In case of null column qualifier, maximum value for the + * entire column family will be returned. + */ @Override - public ProtocolSignature getProtocolSignature( - String protocol, long version, int clientMethodsHashCode) - throws IOException { - if (AggregateProtocol.class.getName().equals(protocol)) { - return new ProtocolSignature(AggregateProtocol.VERSION, null); - } - throw new IOException("Unknown protocol: " + protocol); - } - - @Override - public T getMax(ColumnInterpreter ci, Scan scan) - throws IOException { - T temp; + public void getMax(RpcController controller, AggregateArgument request, + RpcCallback done) { + InternalScanner scanner = null; + AggregateResponse response = null; T max = null; - InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment()) - .getRegion().getScanner(scan); - List results = new ArrayList(); - byte[] colFamily = scan.getFamilies()[0]; - byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); - // qualifier can be null. try { + ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); + T temp; + Scan scan = ProtobufUtil.toScan(request.getScan()); + scanner = env.getRegion().getScanner(scan); + List results = new ArrayList(); + byte[] colFamily = scan.getFamilies()[0]; + byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); + // qualifier can be null. boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); @@ -75,26 +90,46 @@ } results.clear(); } while (hasMoreRows); + if (max != null) { + AggregateResponse.Builder builder = AggregateResponse.newBuilder(); + builder.addFirstPart(ci.getProtoForCellType(max)); + response = builder.build(); + } + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } finally { - scanner.close(); + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) {} + } } log.info("Maximum from this region is " - + ((RegionCoprocessorEnvironment) getEnvironment()).getRegion() - .getRegionNameAsString() + ": " + max); - return max; + + env.getRegion().getRegionNameAsString() + ": " + max); + done.run(response); } + /** + * Gives the minimum for a given combination of column qualifier and column + * family, in the given row range as defined in the Scan object. In its + * current implementation, it takes one column family and one column qualifier + * (if provided). In case of null column qualifier, minimum value for the + * entire column family will be returned. + */ @Override - public T getMin(ColumnInterpreter ci, Scan scan) - throws IOException { + public void getMin(RpcController controller, AggregateArgument request, + RpcCallback done) { + AggregateResponse response = null; + InternalScanner scanner = null; T min = null; - T temp; - InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment()) - .getRegion().getScanner(scan); - List results = new ArrayList(); - byte[] colFamily = scan.getFamilies()[0]; - byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); try { + ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); + T temp; + Scan scan = ProtobufUtil.toScan(request.getScan()); + scanner = env.getRegion().getScanner(scan); + List results = new ArrayList(); + byte[] colFamily = scan.getFamilies()[0]; + byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); @@ -104,27 +139,46 @@ } results.clear(); } while (hasMoreRows); + if (min != null) { + response = AggregateResponse.newBuilder().addFirstPart( + ci.getProtoForCellType(min)).build(); + } + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } finally { - scanner.close(); + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) {} + } } log.info("Minimum from this region is " - + ((RegionCoprocessorEnvironment) getEnvironment()).getRegion() - .getRegionNameAsString() + ": " + min); - return min; + + env.getRegion().getRegionNameAsString() + ": " + min); + done.run(response); } + /** + * Gives the sum for a given combination of column qualifier and column + * family, in the given row range as defined in the Scan object. In its + * current implementation, it takes one column family and one column qualifier + * (if provided). In case of null column qualifier, sum for the entire column + * family will be returned. + */ @Override - public S getSum(ColumnInterpreter ci, Scan scan) - throws IOException { + public void getSum(RpcController controller, AggregateArgument request, + RpcCallback done) { + AggregateResponse response = null; + InternalScanner scanner = null; long sum = 0l; - S sumVal = null; - T temp; - InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment()) - .getRegion().getScanner(scan); - byte[] colFamily = scan.getFamilies()[0]; - byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); - List results = new ArrayList(); try { + ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); + S sumVal = null; + T temp; + Scan scan = ProtobufUtil.toScan(request.getScan()); + scanner = env.getRegion().getScanner(scan); + byte[] colFamily = scan.getFamilies()[0]; + byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); + List results = new ArrayList(); boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); @@ -135,27 +189,43 @@ } results.clear(); } while (hasMoreRows); + if (sumVal != null) { + response = AggregateResponse.newBuilder().addFirstPart( + ci.getProtoForPromotedType(sumVal)).build(); + } + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } finally { - scanner.close(); + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) {} + } } log.debug("Sum from this region is " - + ((RegionCoprocessorEnvironment) getEnvironment()).getRegion() - .getRegionNameAsString() + ": " + sum); - return sumVal; + + env.getRegion().getRegionNameAsString() + ": " + sum); + done.run(response); } + /** + * Gives the row count for the given column family and column qualifier, in + * the given row range as defined in the Scan object. + * @throws IOException + */ @Override - public long getRowNum(ColumnInterpreter ci, Scan scan) - throws IOException { + public void getRowNum(RpcController controller, AggregateArgument request, + RpcCallback done) { + AggregateResponse response = null; long counter = 0l; List results = new ArrayList(); - byte[] colFamily = scan.getFamilies()[0]; - byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); - if (scan.getFilter() == null && qualifier == null) - scan.setFilter(new FirstKeyOnlyFilter()); - InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment()) - .getRegion().getScanner(scan); + InternalScanner scanner = null; try { + Scan scan = ProtobufUtil.toScan(request.getScan()); + byte[] colFamily = scan.getFamilies()[0]; + byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); + if (scan.getFilter() == null && qualifier == null) + scan.setFilter(new FirstKeyOnlyFilter()); + scanner = env.getRegion().getScanner(scan); boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); @@ -164,27 +234,53 @@ } results.clear(); } while (hasMoreRows); + ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter); + bb.rewind(); + response = AggregateResponse.newBuilder().addFirstPart( + ByteString.copyFrom(bb)).build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } finally { - scanner.close(); + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) {} + } } log.info("Row counter from this region is " - + ((RegionCoprocessorEnvironment) getEnvironment()).getRegion() - .getRegionNameAsString() + ": " + counter); - return counter; + + env.getRegion().getRegionNameAsString() + ": " + counter); + done.run(response); } + /** + * Gives a Pair with first object as Sum and second object as row count, + * computed for a given combination of column qualifier and column family in + * the given row range as defined in the Scan object. In its current + * implementation, it takes one column family and one column qualifier (if + * provided). In case of null column qualifier, an aggregate sum over all the + * entire column family will be returned. + *

+ * The average is computed in + * {@link AggregationClient#avg(byte[], ColumnInterpreter, Scan)} by + * processing results from all regions, so its "ok" to pass sum and a Long + * type. + */ @Override - public Pair getAvg(ColumnInterpreter ci, Scan scan) - throws IOException { - S sumVal = null; - Long rowCountVal = 0l; - InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment()) - .getRegion().getScanner(scan); - byte[] colFamily = scan.getFamilies()[0]; - byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); - List results = new ArrayList(); - boolean hasMoreRows = false; + public void getAvg(RpcController controller, AggregateArgument request, + RpcCallback done) { + AggregateResponse response = null; + InternalScanner scanner = null; try { + ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); + S sumVal = null; + Long rowCountVal = 0l; + Scan scan = ProtobufUtil.toScan(request.getScan()); + scanner = env.getRegion().getScanner(scan); + byte[] colFamily = scan.getFamilies()[0]; + byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); + List results = new ArrayList(); + boolean hasMoreRows = false; + do { results.clear(); hasMoreRows = scanner.next(results); @@ -194,26 +290,53 @@ } rowCountVal++; } while (hasMoreRows); + if (sumVal != null) { + ByteString first = ci.getProtoForPromotedType(sumVal); + AggregateResponse.Builder pair = AggregateResponse.newBuilder(); + pair.addFirstPart(first); + ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal); + bb.rewind(); + pair.setSecondPart(ByteString.copyFrom(bb)); + response = pair.build(); + } + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } finally { - scanner.close(); + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) {} + } } - Pair pair = new Pair(sumVal, rowCountVal); - return pair; + done.run(response); } + /** + * Gives a Pair with first object a List containing Sum and sum of squares, + * and the second object as row count. It is computed for a given combination of + * column qualifier and column family in the given row range as defined in the + * Scan object. In its current implementation, it takes one column family and + * one column qualifier (if provided). The idea is get the value of variance first: + * the average of the squares less the square of the average a standard + * deviation is square root of variance. + */ @Override - public Pair, Long> getStd(ColumnInterpreter ci, Scan scan) - throws IOException { - S sumVal = null, sumSqVal = null, tempVal = null; - long rowCountVal = 0l; - InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment()) - .getRegion().getScanner(scan); - byte[] colFamily = scan.getFamilies()[0]; - byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); - List results = new ArrayList(); + public void getStd(RpcController controller, AggregateArgument request, + RpcCallback done) { + InternalScanner scanner = null; + AggregateResponse response = null; + try { + ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); + S sumVal = null, sumSqVal = null, tempVal = null; + long rowCountVal = 0l; + Scan scan = ProtobufUtil.toScan(request.getScan()); + scanner = env.getRegion().getScanner(scan); + byte[] colFamily = scan.getFamilies()[0]; + byte[] qualifier = scan.getFamilyMap().get(colFamily).pollFirst(); + List results = new ArrayList(); - boolean hasMoreRows = false; - try { + boolean hasMoreRows = false; + do { tempVal = null; hasMoreRows = scanner.next(results); @@ -226,32 +349,56 @@ sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal)); rowCountVal++; } while (hasMoreRows); + if (sumVal != null) { + ByteString first_sumVal = ci.getProtoForPromotedType(sumVal); + ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal); + AggregateResponse.Builder pair = AggregateResponse.newBuilder(); + pair.addFirstPart(first_sumVal); + pair.addFirstPart(first_sumSqVal); + ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal); + bb.rewind(); + pair.setSecondPart(ByteString.copyFrom(bb)); + response = pair.build(); + } + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } finally { - scanner.close(); + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) {} + } } - List l = new ArrayList(); - l.add(sumVal); - l.add(sumSqVal); - Pair, Long> p = new Pair, Long>(l, rowCountVal); - return p; + done.run(response); } + /** + * Gives a List containing sum of values and sum of weights. + * It is computed for the combination of column + * family and column qualifier(s) in the given row range as defined in the + * Scan object. In its current implementation, it takes one column family and + * two column qualifiers. The first qualifier is for values column and + * the second qualifier (optional) is for weight column. + */ @Override - public List getMedian(ColumnInterpreter ci, Scan scan) - throws IOException { - S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null; + public void getMedian(RpcController controller, AggregateArgument request, + RpcCallback done) { + AggregateResponse response = null; + InternalScanner scanner = null; + try { + ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); + S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null; + Scan scan = ProtobufUtil.toScan(request.getScan()); + scanner = env.getRegion().getScanner(scan); + byte[] colFamily = scan.getFamilies()[0]; + NavigableSet quals = scan.getFamilyMap().get(colFamily); + byte[] valQualifier = quals.pollFirst(); + // if weighted median is requested, get qualifier for the weight column + byte[] weightQualifier = quals.size() > 1 ? quals.pollLast() : null; + List results = new ArrayList(); - InternalScanner scanner = ((RegionCoprocessorEnvironment) getEnvironment()) - .getRegion().getScanner(scan); - byte[] colFamily = scan.getFamilies()[0]; - NavigableSet quals = scan.getFamilyMap().get(colFamily); - byte[] valQualifier = quals.pollFirst(); - // if weighted median is requested, get qualifier for the weight column - byte[] weightQualifier = quals.size() > 1 ? quals.pollLast() : null; - List results = new ArrayList(); - - boolean hasMoreRows = false; - try { + boolean hasMoreRows = false; + do { tempVal = null; tempWeight = null; @@ -268,13 +415,73 @@ sumVal = ci.add(sumVal, tempVal); sumWeights = ci.add(sumWeights, tempWeight); } while (hasMoreRows); + ByteString first_sumVal = ci.getProtoForPromotedType(sumVal); + S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights; + ByteString first_sumWeights = ci.getProtoForPromotedType(s); + AggregateResponse.Builder pair = AggregateResponse.newBuilder(); + pair.addFirstPart(first_sumVal); + pair.addFirstPart(first_sumWeights); + response = pair.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); } finally { - scanner.close(); + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) {} + } } - List l = new ArrayList(); - l.add(sumVal); - l.add(sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights); - return l; + done.run(response); } + + @SuppressWarnings("unchecked") + ColumnInterpreter constructColumnInterpreterFromRequest( + AggregateArgument request) throws IOException { + String className = request.getInterpreterClassName(); + Class cls; + try { + cls = Class.forName(className); + ColumnInterpreter ci = (ColumnInterpreter) cls.newInstance(); + if (request.hasInterpreterSpecificBytes()) { + ci.initialize(request.getInterpreterSpecificBytes()); + } + return ci; + } catch (ClassNotFoundException e) { + throw new IOException(e); + } catch (InstantiationException e) { + throw new IOException(e); + } catch (IllegalAccessException e) { + throw new IOException(e); + } + } + + @Override + public Service getService() { + return this; + } + + /** + * Stores a reference to the coprocessor environment provided by the + * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded + * on a table region, so always expects this to be an instance of + * {@link RegionCoprocessorEnvironment}. + * @param env the environment provided by the coprocessor host + * @throws IOException if the provided environment is not an instance of + * {@code RegionCoprocessorEnvironment} + */ + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (env instanceof RegionCoprocessorEnvironment) { + this.env = (RegionCoprocessorEnvironment)env; + } else { + throw new CoprocessorException("Must be loaded on a table region!"); + } + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + // nothing to do + } }