Index: metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
===================================================================
--- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java (revision 1672517)
+++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java (working copy)
@@ -25821,6 +25821,3675 @@
// @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Table)
}
+ public interface PartitionKeyComparatorOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string names = 1;
+ /**
+ * required string names = 1;
+ */
+ boolean hasNames();
+ /**
+ * required string names = 1;
+ */
+ java.lang.String getNames();
+ /**
+ * required string names = 1;
+ */
+ com.google.protobuf.ByteString
+ getNamesBytes();
+
+ // required string types = 2;
+ /**
+ * required string types = 2;
+ */
+ boolean hasTypes();
+ /**
+ * required string types = 2;
+ */
+ java.lang.String getTypes();
+ /**
+ * required string types = 2;
+ */
+ com.google.protobuf.ByteString
+ getTypesBytes();
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ java.util.List
+ getOpList();
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index);
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ int getOpCount();
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ java.util.List extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder>
+ getOpOrBuilderList();
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder(
+ int index);
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ java.util.List
+ getRangeList();
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index);
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ int getRangeCount();
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ java.util.List extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder>
+ getRangeOrBuilderList();
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator}
+ */
+ public static final class PartitionKeyComparator extends
+ com.google.protobuf.GeneratedMessage
+ implements PartitionKeyComparatorOrBuilder {
+ // Use PartitionKeyComparator.newBuilder() to construct.
+ private PartitionKeyComparator(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private PartitionKeyComparator(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final PartitionKeyComparator defaultInstance;
+ public static PartitionKeyComparator getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public PartitionKeyComparator getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private PartitionKeyComparator(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ names_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ types_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ op_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ op_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.PARSER, extensionRegistry));
+ break;
+ }
+ case 34: {
+ if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ range_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000008;
+ }
+ range_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ op_ = java.util.Collections.unmodifiableList(op_);
+ }
+ if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ range_ = java.util.Collections.unmodifiableList(range_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public PartitionKeyComparator parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new PartitionKeyComparator(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ public interface MarkOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string value = 1;
+ /**
+ * required string value = 1;
+ */
+ boolean hasValue();
+ /**
+ * required string value = 1;
+ */
+ java.lang.String getValue();
+ /**
+ * required string value = 1;
+ */
+ com.google.protobuf.ByteString
+ getValueBytes();
+
+ // required bool inclusive = 2;
+ /**
+ * required bool inclusive = 2;
+ */
+ boolean hasInclusive();
+ /**
+ * required bool inclusive = 2;
+ */
+ boolean getInclusive();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark}
+ */
+ public static final class Mark extends
+ com.google.protobuf.GeneratedMessage
+ implements MarkOrBuilder {
+ // Use Mark.newBuilder() to construct.
+ private Mark(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Mark(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Mark defaultInstance;
+ public static Mark getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Mark getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Mark(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ value_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ inclusive_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Mark parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Mark(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string value = 1;
+ public static final int VALUE_FIELD_NUMBER = 1;
+ private java.lang.Object value_;
+ /**
+ * required string value = 1;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string value = 1;
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ value_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string value = 1;
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required bool inclusive = 2;
+ public static final int INCLUSIVE_FIELD_NUMBER = 2;
+ private boolean inclusive_;
+ /**
+ * required bool inclusive = 2;
+ */
+ public boolean hasInclusive() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bool inclusive = 2;
+ */
+ public boolean getInclusive() {
+ return inclusive_;
+ }
+
+ private void initFields() {
+ value_ = "";
+ inclusive_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasValue()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasInclusive()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getValueBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBool(2, inclusive_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getValueBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(2, inclusive_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ value_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ inclusive_ = false;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark build() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark buildPartial() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.value_ = value_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.inclusive_ = inclusive_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark) {
+ return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark other) {
+ if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) return this;
+ if (other.hasValue()) {
+ bitField0_ |= 0x00000001;
+ value_ = other.value_;
+ onChanged();
+ }
+ if (other.hasInclusive()) {
+ setInclusive(other.getInclusive());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasValue()) {
+
+ return false;
+ }
+ if (!hasInclusive()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string value = 1;
+ private java.lang.Object value_ = "";
+ /**
+ * required string value = 1;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string value = 1;
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ value_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string value = 1;
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string value = 1;
+ */
+ public Builder setValue(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string value = 1;
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string value = 1;
+ */
+ public Builder setValueBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required bool inclusive = 2;
+ private boolean inclusive_ ;
+ /**
+ * required bool inclusive = 2;
+ */
+ public boolean hasInclusive() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bool inclusive = 2;
+ */
+ public boolean getInclusive() {
+ return inclusive_;
+ }
+ /**
+ * required bool inclusive = 2;
+ */
+ public Builder setInclusive(boolean value) {
+ bitField0_ |= 0x00000002;
+ inclusive_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool inclusive = 2;
+ */
+ public Builder clearInclusive() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ inclusive_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark)
+ }
+
+ static {
+ defaultInstance = new Mark(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark)
+ }
+
+ public interface RangeOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string key = 1;
+ /**
+ * required string key = 1;
+ */
+ boolean hasKey();
+ /**
+ * required string key = 1;
+ */
+ java.lang.String getKey();
+ /**
+ * required string key = 1;
+ */
+ com.google.protobuf.ByteString
+ getKeyBytes();
+
+ // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ boolean hasStart();
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart();
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder();
+
+ // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ boolean hasEnd();
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd();
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range}
+ */
+ public static final class Range extends
+ com.google.protobuf.GeneratedMessage
+ implements RangeOrBuilder {
+ // Use Range.newBuilder() to construct.
+ private Range(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Range(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Range defaultInstance;
+ public static Range getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Range getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Range(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ key_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = start_.toBuilder();
+ }
+ start_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(start_);
+ start_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = end_.toBuilder();
+ }
+ end_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(end_);
+ end_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Range parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Range(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string key = 1;
+ public static final int KEY_FIELD_NUMBER = 1;
+ private java.lang.Object key_;
+ /**
+ * required string key = 1;
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string key = 1;
+ */
+ public java.lang.String getKey() {
+ java.lang.Object ref = key_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ key_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string key = 1;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
+ java.lang.Object ref = key_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ key_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ public static final int START_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark start_;
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public boolean hasStart() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart() {
+ return start_;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder() {
+ return start_;
+ }
+
+ // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ public static final int END_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark end_;
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public boolean hasEnd() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd() {
+ return end_;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder() {
+ return end_;
+ }
+
+ private void initFields() {
+ key_ = "";
+ start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasKey()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasStart()) {
+ if (!getStart().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ if (hasEnd()) {
+ if (!getEnd().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getKeyBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, start_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, end_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getKeyBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, start_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, end_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getStartFieldBuilder();
+ getEndFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ key_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (startBuilder_ == null) {
+ start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ } else {
+ startBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (endBuilder_ == null) {
+ end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ } else {
+ endBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range build() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range buildPartial() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.key_ = key_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (startBuilder_ == null) {
+ result.start_ = start_;
+ } else {
+ result.start_ = startBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (endBuilder_ == null) {
+ result.end_ = end_;
+ } else {
+ result.end_ = endBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range) {
+ return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range other) {
+ if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance()) return this;
+ if (other.hasKey()) {
+ bitField0_ |= 0x00000001;
+ key_ = other.key_;
+ onChanged();
+ }
+ if (other.hasStart()) {
+ mergeStart(other.getStart());
+ }
+ if (other.hasEnd()) {
+ mergeEnd(other.getEnd());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasKey()) {
+
+ return false;
+ }
+ if (hasStart()) {
+ if (!getStart().isInitialized()) {
+
+ return false;
+ }
+ }
+ if (hasEnd()) {
+ if (!getEnd().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string key = 1;
+ private java.lang.Object key_ = "";
+ /**
+ * required string key = 1;
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string key = 1;
+ */
+ public java.lang.String getKey() {
+ java.lang.Object ref = key_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ key_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string key = 1;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
+ java.lang.Object ref = key_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ key_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string key = 1;
+ */
+ public Builder setKey(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ key_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string key = 1;
+ */
+ public Builder clearKey() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ key_ = getDefaultInstance().getKey();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string key = 1;
+ */
+ public Builder setKeyBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ key_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> startBuilder_;
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public boolean hasStart() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart() {
+ if (startBuilder_ == null) {
+ return start_;
+ } else {
+ return startBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public Builder setStart(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) {
+ if (startBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ start_ = value;
+ onChanged();
+ } else {
+ startBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public Builder setStart(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder builderForValue) {
+ if (startBuilder_ == null) {
+ start_ = builderForValue.build();
+ onChanged();
+ } else {
+ startBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public Builder mergeStart(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) {
+ if (startBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ start_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) {
+ start_ =
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder(start_).mergeFrom(value).buildPartial();
+ } else {
+ start_ = value;
+ }
+ onChanged();
+ } else {
+ startBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public Builder clearStart() {
+ if (startBuilder_ == null) {
+ start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ onChanged();
+ } else {
+ startBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder getStartBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getStartFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder() {
+ if (startBuilder_ != null) {
+ return startBuilder_.getMessageOrBuilder();
+ } else {
+ return start_;
+ }
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>
+ getStartFieldBuilder() {
+ if (startBuilder_ == null) {
+ startBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>(
+ start_,
+ getParentForChildren(),
+ isClean());
+ start_ = null;
+ }
+ return startBuilder_;
+ }
+
+ // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> endBuilder_;
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public boolean hasEnd() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd() {
+ if (endBuilder_ == null) {
+ return end_;
+ } else {
+ return endBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public Builder setEnd(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) {
+ if (endBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ end_ = value;
+ onChanged();
+ } else {
+ endBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public Builder setEnd(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder builderForValue) {
+ if (endBuilder_ == null) {
+ end_ = builderForValue.build();
+ onChanged();
+ } else {
+ endBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public Builder mergeEnd(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) {
+ if (endBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ end_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) {
+ end_ =
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder(end_).mergeFrom(value).buildPartial();
+ } else {
+ end_ = value;
+ }
+ onChanged();
+ } else {
+ endBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public Builder clearEnd() {
+ if (endBuilder_ == null) {
+ end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance();
+ onChanged();
+ } else {
+ endBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder getEndBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getEndFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder() {
+ if (endBuilder_ != null) {
+ return endBuilder_.getMessageOrBuilder();
+ } else {
+ return end_;
+ }
+ }
+ /**
+ * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>
+ getEndFieldBuilder() {
+ if (endBuilder_ == null) {
+ endBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>(
+ end_,
+ getParentForChildren(),
+ isClean());
+ end_ = null;
+ }
+ return endBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range)
+ }
+
+ static {
+ defaultInstance = new Range(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range)
+ }
+
+ public interface OperatorOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ boolean hasType();
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType();
+
+ // required string key = 2;
+ /**
+ * required string key = 2;
+ */
+ boolean hasKey();
+ /**
+ * required string key = 2;
+ */
+ java.lang.String getKey();
+ /**
+ * required string key = 2;
+ */
+ com.google.protobuf.ByteString
+ getKeyBytes();
+
+ // required string val = 3;
+ /**
+ * required string val = 3;
+ */
+ boolean hasVal();
+ /**
+ * required string val = 3;
+ */
+ java.lang.String getVal();
+ /**
+ * required string val = 3;
+ */
+ com.google.protobuf.ByteString
+ getValBytes();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator}
+ */
+ public static final class Operator extends
+ com.google.protobuf.GeneratedMessage
+ implements OperatorOrBuilder {
+ // Use Operator.newBuilder() to construct.
+ private Operator(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Operator(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Operator defaultInstance;
+ public static Operator getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Operator getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Operator(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(1, rawValue);
+ } else {
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ }
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ key_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ val_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Operator parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Operator(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ /**
+ * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type}
+ */
+ public enum Type
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * LIKE = 0;
+ */
+ LIKE(0, 0),
+ /**
+ * NOTEQUALS = 1;
+ */
+ NOTEQUALS(1, 1),
+ ;
+
+ /**
+ * LIKE = 0;
+ */
+ public static final int LIKE_VALUE = 0;
+ /**
+ * NOTEQUALS = 1;
+ */
+ public static final int NOTEQUALS_VALUE = 1;
+
+
+ public final int getNumber() { return value; }
+
+ public static Type valueOf(int value) {
+ switch (value) {
+ case 0: return LIKE;
+ case 1: return NOTEQUALS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public Type findValueByNumber(int number) {
+ return Type.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final Type[] VALUES = values();
+
+ public static Type valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private Type(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type)
+ }
+
+ private int bitField0_;
+ // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ public static final int TYPE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type type_;
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType() {
+ return type_;
+ }
+
+ // required string key = 2;
+ public static final int KEY_FIELD_NUMBER = 2;
+ private java.lang.Object key_;
+ /**
+ * required string key = 2;
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string key = 2;
+ */
+ public java.lang.String getKey() {
+ java.lang.Object ref = key_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ key_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string key = 2;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
+ java.lang.Object ref = key_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ key_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string val = 3;
+ public static final int VAL_FIELD_NUMBER = 3;
+ private java.lang.Object val_;
+ /**
+ * required string val = 3;
+ */
+ public boolean hasVal() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string val = 3;
+ */
+ public java.lang.String getVal() {
+ java.lang.Object ref = val_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ val_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string val = 3;
+ */
+ public com.google.protobuf.ByteString
+ getValBytes() {
+ java.lang.Object ref = val_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ val_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE;
+ key_ = "";
+ val_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasKey()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasVal()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeEnum(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getKeyBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getValBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getKeyBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getValBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ key_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ val_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator build() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator buildPartial() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.type_ = type_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.key_ = key_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.val_ = val_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator) {
+ return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator other) {
+ if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance()) return this;
+ if (other.hasType()) {
+ setType(other.getType());
+ }
+ if (other.hasKey()) {
+ bitField0_ |= 0x00000002;
+ key_ = other.key_;
+ onChanged();
+ }
+ if (other.hasVal()) {
+ bitField0_ |= 0x00000004;
+ val_ = other.val_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasType()) {
+
+ return false;
+ }
+ if (!hasKey()) {
+
+ return false;
+ }
+ if (!hasVal()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE;
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType() {
+ return type_;
+ }
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ public Builder setType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE;
+ onChanged();
+ return this;
+ }
+
+ // required string key = 2;
+ private java.lang.Object key_ = "";
+ /**
+ * required string key = 2;
+ */
+ public boolean hasKey() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string key = 2;
+ */
+ public java.lang.String getKey() {
+ java.lang.Object ref = key_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ key_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string key = 2;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
+ java.lang.Object ref = key_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ key_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string key = 2;
+ */
+ public Builder setKey(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ key_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string key = 2;
+ */
+ public Builder clearKey() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ key_ = getDefaultInstance().getKey();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string key = 2;
+ */
+ public Builder setKeyBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ key_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string val = 3;
+ private java.lang.Object val_ = "";
+ /**
+ * required string val = 3;
+ */
+ public boolean hasVal() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string val = 3;
+ */
+ public java.lang.String getVal() {
+ java.lang.Object ref = val_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ val_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string val = 3;
+ */
+ public com.google.protobuf.ByteString
+ getValBytes() {
+ java.lang.Object ref = val_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ val_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string val = 3;
+ */
+ public Builder setVal(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ val_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string val = 3;
+ */
+ public Builder clearVal() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ val_ = getDefaultInstance().getVal();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string val = 3;
+ */
+ public Builder setValBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ val_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator)
+ }
+
+ static {
+ defaultInstance = new Operator(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator)
+ }
+
+ private int bitField0_;
+ // required string names = 1;
+ public static final int NAMES_FIELD_NUMBER = 1;
+ private java.lang.Object names_;
+ /**
+ * required string names = 1;
+ */
+ public boolean hasNames() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string names = 1;
+ */
+ public java.lang.String getNames() {
+ java.lang.Object ref = names_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ names_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string names = 1;
+ */
+ public com.google.protobuf.ByteString
+ getNamesBytes() {
+ java.lang.Object ref = names_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ names_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string types = 2;
+ public static final int TYPES_FIELD_NUMBER = 2;
+ private java.lang.Object types_;
+ /**
+ * required string types = 2;
+ */
+ public boolean hasTypes() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string types = 2;
+ */
+ public java.lang.String getTypes() {
+ java.lang.Object ref = types_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ types_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string types = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTypesBytes() {
+ java.lang.Object ref = types_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ types_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ public static final int OP_FIELD_NUMBER = 3;
+ private java.util.List op_;
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public java.util.List getOpList() {
+ return op_;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder>
+ getOpOrBuilderList() {
+ return op_;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public int getOpCount() {
+ return op_.size();
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index) {
+ return op_.get(index);
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder(
+ int index) {
+ return op_.get(index);
+ }
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ public static final int RANGE_FIELD_NUMBER = 4;
+ private java.util.List range_;
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public java.util.List getRangeList() {
+ return range_;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public java.util.List extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder>
+ getRangeOrBuilderList() {
+ return range_;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public int getRangeCount() {
+ return range_.size();
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index) {
+ return range_.get(index);
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder(
+ int index) {
+ return range_.get(index);
+ }
+
+ private void initFields() {
+ names_ = "";
+ types_ = "";
+ op_ = java.util.Collections.emptyList();
+ range_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNames()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTypes()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getOpCount(); i++) {
+ if (!getOp(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getRangeCount(); i++) {
+ if (!getRange(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNamesBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getTypesBytes());
+ }
+ for (int i = 0; i < op_.size(); i++) {
+ output.writeMessage(3, op_.get(i));
+ }
+ for (int i = 0; i < range_.size(); i++) {
+ output.writeMessage(4, range_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNamesBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getTypesBytes());
+ }
+ for (int i = 0; i < op_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, op_.get(i));
+ }
+ for (int i = 0; i < range_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, range_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparatorOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getOpFieldBuilder();
+ getRangeFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ names_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ types_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (opBuilder_ == null) {
+ op_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ opBuilder_.clear();
+ }
+ if (rangeBuilder_ == null) {
+ range_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ rangeBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator build() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator buildPartial() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.names_ = names_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.types_ = types_;
+ if (opBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ op_ = java.util.Collections.unmodifiableList(op_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.op_ = op_;
+ } else {
+ result.op_ = opBuilder_.build();
+ }
+ if (rangeBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ range_ = java.util.Collections.unmodifiableList(range_);
+ bitField0_ = (bitField0_ & ~0x00000008);
+ }
+ result.range_ = range_;
+ } else {
+ result.range_ = rangeBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator) {
+ return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator other) {
+ if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.getDefaultInstance()) return this;
+ if (other.hasNames()) {
+ bitField0_ |= 0x00000001;
+ names_ = other.names_;
+ onChanged();
+ }
+ if (other.hasTypes()) {
+ bitField0_ |= 0x00000002;
+ types_ = other.types_;
+ onChanged();
+ }
+ if (opBuilder_ == null) {
+ if (!other.op_.isEmpty()) {
+ if (op_.isEmpty()) {
+ op_ = other.op_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureOpIsMutable();
+ op_.addAll(other.op_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.op_.isEmpty()) {
+ if (opBuilder_.isEmpty()) {
+ opBuilder_.dispose();
+ opBuilder_ = null;
+ op_ = other.op_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ opBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getOpFieldBuilder() : null;
+ } else {
+ opBuilder_.addAllMessages(other.op_);
+ }
+ }
+ }
+ if (rangeBuilder_ == null) {
+ if (!other.range_.isEmpty()) {
+ if (range_.isEmpty()) {
+ range_ = other.range_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ ensureRangeIsMutable();
+ range_.addAll(other.range_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.range_.isEmpty()) {
+ if (rangeBuilder_.isEmpty()) {
+ rangeBuilder_.dispose();
+ rangeBuilder_ = null;
+ range_ = other.range_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ rangeBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRangeFieldBuilder() : null;
+ } else {
+ rangeBuilder_.addAllMessages(other.range_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasNames()) {
+
+ return false;
+ }
+ if (!hasTypes()) {
+
+ return false;
+ }
+ for (int i = 0; i < getOpCount(); i++) {
+ if (!getOp(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getRangeCount(); i++) {
+ if (!getRange(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string names = 1;
+ private java.lang.Object names_ = "";
+ /**
+ * required string names = 1;
+ */
+ public boolean hasNames() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string names = 1;
+ */
+ public java.lang.String getNames() {
+ java.lang.Object ref = names_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ names_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string names = 1;
+ */
+ public com.google.protobuf.ByteString
+ getNamesBytes() {
+ java.lang.Object ref = names_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ names_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string names = 1;
+ */
+ public Builder setNames(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ names_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string names = 1;
+ */
+ public Builder clearNames() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ names_ = getDefaultInstance().getNames();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string names = 1;
+ */
+ public Builder setNamesBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ names_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string types = 2;
+ private java.lang.Object types_ = "";
+ /**
+ * required string types = 2;
+ */
+ public boolean hasTypes() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string types = 2;
+ */
+ public java.lang.String getTypes() {
+ java.lang.Object ref = types_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ types_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string types = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTypesBytes() {
+ java.lang.Object ref = types_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ types_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string types = 2;
+ */
+ public Builder setTypes(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ types_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string types = 2;
+ */
+ public Builder clearTypes() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ types_ = getDefaultInstance().getTypes();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string types = 2;
+ */
+ public Builder setTypesBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ types_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ private java.util.List op_ =
+ java.util.Collections.emptyList();
+ private void ensureOpIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ op_ = new java.util.ArrayList(op_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder> opBuilder_;
+
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public java.util.List getOpList() {
+ if (opBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(op_);
+ } else {
+ return opBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public int getOpCount() {
+ if (opBuilder_ == null) {
+ return op_.size();
+ } else {
+ return opBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index) {
+ if (opBuilder_ == null) {
+ return op_.get(index);
+ } else {
+ return opBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder setOp(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) {
+ if (opBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureOpIsMutable();
+ op_.set(index, value);
+ onChanged();
+ } else {
+ opBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder setOp(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) {
+ if (opBuilder_ == null) {
+ ensureOpIsMutable();
+ op_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ opBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder addOp(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) {
+ if (opBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureOpIsMutable();
+ op_.add(value);
+ onChanged();
+ } else {
+ opBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder addOp(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) {
+ if (opBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureOpIsMutable();
+ op_.add(index, value);
+ onChanged();
+ } else {
+ opBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder addOp(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) {
+ if (opBuilder_ == null) {
+ ensureOpIsMutable();
+ op_.add(builderForValue.build());
+ onChanged();
+ } else {
+ opBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder addOp(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) {
+ if (opBuilder_ == null) {
+ ensureOpIsMutable();
+ op_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ opBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder addAllOp(
+ java.lang.Iterable extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator> values) {
+ if (opBuilder_ == null) {
+ ensureOpIsMutable();
+ super.addAll(values, op_);
+ onChanged();
+ } else {
+ opBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder clearOp() {
+ if (opBuilder_ == null) {
+ op_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ opBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public Builder removeOp(int index) {
+ if (opBuilder_ == null) {
+ ensureOpIsMutable();
+ op_.remove(index);
+ onChanged();
+ } else {
+ opBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder getOpBuilder(
+ int index) {
+ return getOpFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder(
+ int index) {
+ if (opBuilder_ == null) {
+ return op_.get(index); } else {
+ return opBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder>
+ getOpOrBuilderList() {
+ if (opBuilder_ != null) {
+ return opBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(op_);
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder addOpBuilder() {
+ return getOpFieldBuilder().addBuilder(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance());
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder addOpBuilder(
+ int index) {
+ return getOpFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance());
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3;
+ */
+ public java.util.List
+ getOpBuilderList() {
+ return getOpFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder>
+ getOpFieldBuilder() {
+ if (opBuilder_ == null) {
+ opBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder>(
+ op_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ op_ = null;
+ }
+ return opBuilder_;
+ }
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ private java.util.List range_ =
+ java.util.Collections.emptyList();
+ private void ensureRangeIsMutable() {
+ if (!((bitField0_ & 0x00000008) == 0x00000008)) {
+ range_ = new java.util.ArrayList(range_);
+ bitField0_ |= 0x00000008;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder> rangeBuilder_;
+
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public java.util.List getRangeList() {
+ if (rangeBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(range_);
+ } else {
+ return rangeBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public int getRangeCount() {
+ if (rangeBuilder_ == null) {
+ return range_.size();
+ } else {
+ return rangeBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index) {
+ if (rangeBuilder_ == null) {
+ return range_.get(index);
+ } else {
+ return rangeBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder setRange(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) {
+ if (rangeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRangeIsMutable();
+ range_.set(index, value);
+ onChanged();
+ } else {
+ rangeBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder setRange(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) {
+ if (rangeBuilder_ == null) {
+ ensureRangeIsMutable();
+ range_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ rangeBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder addRange(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) {
+ if (rangeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRangeIsMutable();
+ range_.add(value);
+ onChanged();
+ } else {
+ rangeBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder addRange(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) {
+ if (rangeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRangeIsMutable();
+ range_.add(index, value);
+ onChanged();
+ } else {
+ rangeBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder addRange(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) {
+ if (rangeBuilder_ == null) {
+ ensureRangeIsMutable();
+ range_.add(builderForValue.build());
+ onChanged();
+ } else {
+ rangeBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder addRange(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) {
+ if (rangeBuilder_ == null) {
+ ensureRangeIsMutable();
+ range_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ rangeBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder addAllRange(
+ java.lang.Iterable extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range> values) {
+ if (rangeBuilder_ == null) {
+ ensureRangeIsMutable();
+ super.addAll(values, range_);
+ onChanged();
+ } else {
+ rangeBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder clearRange() {
+ if (rangeBuilder_ == null) {
+ range_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ onChanged();
+ } else {
+ rangeBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public Builder removeRange(int index) {
+ if (rangeBuilder_ == null) {
+ ensureRangeIsMutable();
+ range_.remove(index);
+ onChanged();
+ } else {
+ rangeBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder getRangeBuilder(
+ int index) {
+ return getRangeFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder(
+ int index) {
+ if (rangeBuilder_ == null) {
+ return range_.get(index); } else {
+ return rangeBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public java.util.List extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder>
+ getRangeOrBuilderList() {
+ if (rangeBuilder_ != null) {
+ return rangeBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(range_);
+ }
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder addRangeBuilder() {
+ return getRangeFieldBuilder().addBuilder(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance());
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder addRangeBuilder(
+ int index) {
+ return getRangeFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance());
+ }
+ /**
+ * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4;
+ */
+ public java.util.List
+ getRangeBuilderList() {
+ return getRangeFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder>
+ getRangeFieldBuilder() {
+ if (rangeBuilder_ == null) {
+ rangeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder>(
+ range_,
+ ((bitField0_ & 0x00000008) == 0x00000008),
+ getParentForChildren(),
+ isClean());
+ range_ = null;
+ }
+ return rangeBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator)
+ }
+
+ static {
+ defaultInstance = new PartitionKeyComparator(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator)
+ }
+
private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor;
private static
@@ -25961,6 +29630,26 @@
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -26093,8 +29782,24 @@
"\n \001(\t\022\032\n\022view_expanded_text\030\013 \001(\t\022\022\n\ntab" +
"le_type\030\014 \001(\t\022Q\n\nprivileges\030\r \001(\0132=.org." +
"apache.hadoop.hive.metastore.hbase.Princ" +
- "ipalPrivilegeSet\022\024\n\014is_temporary\030\016 \001(\010*#" +
- "\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001"
+ "ipalPrivilegeSet\022\024\n\014is_temporary\030\016 \001(\010\"\353" +
+ "\004\n\026PartitionKeyComparator\022\r\n\005names\030\001 \002(\t" +
+ "\022\r\n\005types\030\002 \002(\t\022S\n\002op\030\003 \003(\0132G.org.apache" +
+ ".hadoop.hive.metastore.hbase.PartitionKe" +
+ "yComparator.Operator\022S\n\005range\030\004 \003(\0132D.or" +
+ "g.apache.hadoop.hive.metastore.hbase.Par" +
+ "titionKeyComparator.Range\032(\n\004Mark\022\r\n\005val",
+ "ue\030\001 \002(\t\022\021\n\tinclusive\030\002 \002(\010\032\272\001\n\005Range\022\013\n" +
+ "\003key\030\001 \002(\t\022R\n\005start\030\002 \001(\0132C.org.apache.h" +
+ "adoop.hive.metastore.hbase.PartitionKeyC" +
+ "omparator.Mark\022P\n\003end\030\003 \001(\0132C.org.apache" +
+ ".hadoop.hive.metastore.hbase.PartitionKe" +
+ "yComparator.Mark\032\241\001\n\010Operator\022Z\n\004type\030\001 " +
+ "\002(\0162L.org.apache.hadoop.hive.metastore.h" +
+ "base.PartitionKeyComparator.Operator.Typ" +
+ "e\022\013\n\003key\030\002 \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Type\022\010\n\004L" +
+ "IKE\020\000\022\r\n\tNOTEQUALS\020\001*#\n\rPrincipalType\022\010\n",
+ "\004USER\020\000\022\010\n\004ROLE\020\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -26269,6 +29974,30 @@
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor,
new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "TableType", "Privileges", "IsTemporary", });
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor =
+ getDescriptor().getMessageTypes().get(16);
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor,
+ new java.lang.String[] { "Names", "Types", "Op", "Range", });
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor =
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor,
+ new java.lang.String[] { "Value", "Inclusive", });
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor =
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(1);
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor,
+ new java.lang.String[] { "Key", "Start", "End", });
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor =
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(2);
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor,
+ new java.lang.String[] { "Type", "Key", "Val", });
return null;
}
};
Index: metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java (revision 1672517)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java (working copy)
@@ -20,15 +20,30 @@
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
@@ -147,7 +162,7 @@
public static class ScanPlan extends FilterPlan {
public static class ScanMarker {
- final byte[] bytes;
+ final String value;
/**
* If inclusive = true, it means that the
* marker includes those bytes.
@@ -155,20 +170,24 @@
* or ends at the next possible byte array
*/
final boolean isInclusive;
- ScanMarker(byte [] b, boolean i){
- this.bytes = b;
+ final String type;
+ ScanMarker(String obj, boolean i, String type){
+ this.value = obj;
this.isInclusive = i;
+ this.type = type;
}
@Override
public String toString() {
- return "ScanMarker [bytes=" + Arrays.toString(bytes) + ", isInclusive=" + isInclusive + "]";
+ return "ScanMarker [" + "value=" + value.toString() + ", isInclusive=" + isInclusive +
+ ", type=" + type + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
- result = prime * result + Arrays.hashCode(bytes);
+ result = prime * result + value.hashCode();
result = prime * result + (isInclusive ? 1231 : 1237);
+ result = prime * result + type.hashCode();
return result;
}
@Override
@@ -180,49 +199,111 @@
if (getClass() != obj.getClass())
return false;
ScanMarker other = (ScanMarker) obj;
- if (!Arrays.equals(bytes, other.bytes))
+ if (!value.equals(other.value))
return false;
if (isInclusive != other.isInclusive)
return false;
+ if (type != other.type)
+ return false;
return true;
}
}
+ public static class ScanMarkerPair {
+ public ScanMarkerPair(ScanMarker startMarker, ScanMarker endMarker) {
+ this.startMarker = startMarker;
+ this.endMarker = endMarker;
+ }
+ ScanMarker startMarker;
+ ScanMarker endMarker;
+ }
// represent Scan start
- private ScanMarker startMarker = new ScanMarker(null, false);
- // represent Scan end
- private ScanMarker endMarker = new ScanMarker(null, false);
+ Map markers = new HashMap();
+ List ops = new ArrayList();
- private ScanFilter filter;
-
- public ScanFilter getFilter() {
- return filter;
+ private int getMajorPartsCount(List parts) {
+ int majorPartsCount = 0;
+ while (majorPartsCount parts) {
+ int majorPartsCount = getMajorPartsCount(parts);
+ Set majorKeys = new HashSet();
+ for (int i=0;i names = HBaseUtils.getPartitionNames(parts);
+ List ranges = new ArrayList();
+ for (Map.Entry entry : markers.entrySet()) {
+ if (names.contains(entry.getKey()) && !majorKeys.contains(entry.getKey())) {
+ PartitionKeyComparator.Mark startMark = null;
+ if (entry.getValue().startMarker != null) {
+ startMark = new PartitionKeyComparator.Mark(entry.getValue().startMarker.value,
+ entry.getValue().startMarker.isInclusive);
+ }
+ PartitionKeyComparator.Mark endMark = null;
+ if (entry.getValue().endMarker != null) {
+ startMark = new PartitionKeyComparator.Mark(entry.getValue().endMarker.value,
+ entry.getValue().endMarker.isInclusive);
+ }
+ PartitionKeyComparator.Range range = new PartitionKeyComparator.Range(
+ entry.getKey(), startMark, endMark);
+ ranges.add(range);
+ }
+ }
+
+ if (ranges.isEmpty() && ops.isEmpty()) {
+ return null;
+ } else {
+ return new RowFilter(CompareFilter.CompareOp.EQUAL, new PartitionKeyComparator(
+ StringUtils.join(names, ","), StringUtils.join(HBaseUtils.getPartitionKeyTypes(parts), ","),
+ ranges, ops));
+ }
}
- public ScanMarker getStartMarker() {
- return startMarker;
+ public void setStartMarker(String keyName, String keyType, String start, boolean isInclusive) {
+ if (markers.containsKey(keyName)) {
+ markers.get(keyName).startMarker = new ScanMarker(start, isInclusive, keyType);
+ } else {
+ ScanMarkerPair marker = new ScanMarkerPair(new ScanMarker(start, isInclusive, keyType), null);
+ markers.put(keyName, marker);
+ }
}
- public void setStartMarker(ScanMarker startMarker) {
- this.startMarker = startMarker;
+ public ScanMarker getStartMarker(String keyName) {
+ if (markers.containsKey(keyName)) {
+ return markers.get(keyName).startMarker;
+ } else {
+ return null;
+ }
}
- public void setStartMarker(byte[] start, boolean isInclusive) {
- setStartMarker(new ScanMarker(start, isInclusive));
- }
- public ScanMarker getEndMarker() {
- return endMarker;
+ public void setEndMarker(String keyName, String keyType, String end, boolean isInclusive) {
+ if (markers.containsKey(keyName)) {
+ markers.get(keyName).endMarker = new ScanMarker(end, isInclusive, keyType);
+ } else {
+ ScanMarkerPair marker = new ScanMarkerPair(null, new ScanMarker(end, isInclusive, keyType));
+ markers.put(keyName, marker);
+ }
}
- public void setEndMarker(ScanMarker endMarker) {
- this.endMarker = endMarker;
+ public ScanMarker getEndMarker(String keyName) {
+ if (markers.containsKey(keyName)) {
+ return markers.get(keyName).endMarker;
+ } else {
+ return null;
+ }
}
- public void setEndMarker(byte[] end, boolean isInclusive) {
- setEndMarker(new ScanMarker(end, isInclusive));
- }
@Override
public FilterPlan and(FilterPlan other) {
@@ -236,28 +317,33 @@
private ScanPlan and(ScanPlan other) {
// create combined FilterPlan based on existing lhs and rhs plan
ScanPlan newPlan = new ScanPlan();
+ newPlan.markers.putAll(markers);
- // create new scan start
- ScanMarker greaterStartMarker = getComparedMarker(this.getStartMarker(),
- other.getStartMarker(), true);
- newPlan.setStartMarker(greaterStartMarker);
+ for (String keyName : other.markers.keySet()) {
+ if (newPlan.markers.containsKey(keyName)) {
+ // create new scan start
+ ScanMarker greaterStartMarker = getComparedMarker(this.getStartMarker(keyName),
+ other.getStartMarker(keyName), true);
+ if (greaterStartMarker != null) {
+ newPlan.setStartMarker(keyName, greaterStartMarker.type, greaterStartMarker.value, greaterStartMarker.isInclusive);
+ }
- // create new scan end
- ScanMarker lesserEndMarker = getComparedMarker(this.getEndMarker(), other.getEndMarker(),
- false);
- newPlan.setEndMarker(lesserEndMarker);
+ // create new scan end
+ ScanMarker lesserEndMarker = getComparedMarker(this.getEndMarker(keyName), other.getEndMarker(keyName),
+ false);
+ if (lesserEndMarker != null) {
+ newPlan.setEndMarker(keyName, lesserEndMarker.type, lesserEndMarker.value, lesserEndMarker.isInclusive);
+ }
+ } else {
+ newPlan.markers.put(keyName, other.markers.get(keyName));
+ }
+ }
- // create new filter plan
- newPlan.setFilter(createCombinedFilter(this.getFilter(), other.getFilter()));
-
+ newPlan.ops.addAll(ops);
+ newPlan.ops.addAll(other.ops);
return newPlan;
}
- private ScanFilter createCombinedFilter(ScanFilter filter1, ScanFilter filter2) {
- // TODO create combined filter - filter1 && filter2
- return null;
- }
-
/**
* @param lStartMarker
* @param rStartMarker
@@ -268,13 +354,23 @@
static ScanMarker getComparedMarker(ScanMarker lStartMarker, ScanMarker rStartMarker,
boolean getGreater) {
// if one of them has null bytes, just return other
- if(lStartMarker.bytes == null) {
+ if(lStartMarker == null) {
return rStartMarker;
- } else if (rStartMarker.bytes == null) {
+ } else if (rStartMarker == null) {
return lStartMarker;
}
+ TypeInfo expectedType =
+ TypeInfoUtils.getTypeInfoFromTypeString(lStartMarker.type);
+ ObjectInspector outputOI =
+ TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType);
+ Converter lConverter = ObjectInspectorConverters.getConverter(
+ PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI);
+ Converter rConverter = ObjectInspectorConverters.getConverter(
+ PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI);
+ Comparable lValue = (Comparable)lConverter.convert(lStartMarker.value);
+ Comparable rValue = (Comparable)rConverter.convert(rStartMarker.value);
- int compareRes = compare(lStartMarker.bytes, rStartMarker.bytes);
+ int compareRes = lValue.compareTo(rValue);
if (compareRes == 0) {
// bytes are equal, now compare the isInclusive flags
if (lStartMarker.isInclusive == rStartMarker.isInclusive) {
@@ -287,7 +383,7 @@
isInclusive = false;
}
// else
- return new ScanMarker(lStartMarker.bytes, isInclusive);
+ return new ScanMarker(lStartMarker.value, isInclusive, lStartMarker.type);
}
if (getGreater) {
return compareRes == 1 ? lStartMarker : rStartMarker;
@@ -313,42 +409,74 @@
/**
* @return row suffix - This is appended to db + table, to generate start row for the Scan
*/
- public byte[] getStartRowSuffix() {
- if (startMarker.isInclusive) {
- return startMarker.bytes;
- } else {
- return HBaseUtils.getEndPrefix(startMarker.bytes);
+ public byte[] getStartRowSuffix(String dbName, String tableName, List parts) {
+ int majorPartsCount = getMajorPartsCount(parts);
+ List majorPartTypes = new ArrayList();
+ List components = new ArrayList();
+ boolean endPrefix = false;
+ for (int i=0;i parts) {
+ int majorPartsCount = getMajorPartsCount(parts);
+ List majorPartTypes = new ArrayList();
+ List components = new ArrayList();
+ boolean endPrefix = false;
+ for (int i=0;i entry : markers.entrySet()) {
+ sb.append("key=" + entry.getKey() + "[startMarker=" + entry.getValue().startMarker
+ + ", endMarker=" + entry.getValue().endMarker + "]");
+ }
+ return sb.toString();
}
}
/**
- * represent a plan that can be used to create a hbase filter and then set in
- * Scan.setFilter()
- */
- public static class ScanFilter {
- // TODO: implement this
- }
-
- /**
* Visitor for ExpressionTree.
* It first generates the ScanPlan for the leaf nodes. The higher level nodes are
* either AND or OR operations. It then calls FilterPlan.and and FilterPlan.or with
@@ -369,9 +497,12 @@
// temporary params for current left and right side plans, for AND, OR
private FilterPlan rPlan;
- private final String firstPartcolumn;
- public PartitionFilterGenerator(String firstPartitionColumn) {
- this.firstPartcolumn = firstPartitionColumn;
+ private Map nameToType = new HashMap();
+
+ public PartitionFilterGenerator(List parts) {
+ for (FieldSchema part : parts) {
+ nameToType.put(part.getName(), part.getType());
+ }
}
FilterPlan getPlan() {
@@ -414,63 +545,37 @@
public void visit(LeafNode node) throws MetaException {
ScanPlan leafPlan = new ScanPlan();
curPlan = leafPlan;
- if (!isFirstParitionColumn(node.keyName)) {
- leafPlan.setFilter(generateScanFilter(node));
- return;
- }
- if (!(node.value instanceof String)) {
- // only string type is supported currently
- // treat conditions on other types as true
- return;
- }
// this is a condition on first partition column, so might influence the
// start and end of the scan
final boolean INCLUSIVE = true;
switch (node.operator) {
case EQUALS:
- leafPlan.setStartMarker(toBytes(node.value), INCLUSIVE);
- leafPlan.setEndMarker(toBytes(node.value), INCLUSIVE);
+ leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE);
+ leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE);
break;
case GREATERTHAN:
- leafPlan.setStartMarker(toBytes(node.value), !INCLUSIVE);
+ leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), !INCLUSIVE);
break;
case GREATERTHANOREQUALTO:
- leafPlan.setStartMarker(toBytes(node.value), INCLUSIVE);
+ leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE);
break;
case LESSTHAN:
- leafPlan.setEndMarker(toBytes(node.value), !INCLUSIVE);
+ leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), !INCLUSIVE);
break;
case LESSTHANOREQUALTO:
- leafPlan.setEndMarker(toBytes(node.value), INCLUSIVE);
+ leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE);
break;
case LIKE:
+ leafPlan.ops.add(new Operator(Operator.Type.LIKE, node.keyName, node.value.toString()));
+ break;
case NOTEQUALS:
case NOTEQUALS2:
- // TODO: create filter plan for these
- hasUnsupportedCondition = true;
+ leafPlan.ops.add(new Operator(Operator.Type.NOTEQUALS, node.keyName, node.value.toString()));
break;
}
}
- @VisibleForTesting
- static byte[] toBytes(Object value) {
- // TODO: actually implement this
- // We need to determine the actual type and use appropriate
- // serialization format for that type
- return ((String) value).getBytes(HBaseUtils.ENCODING);
- }
-
- private ScanFilter generateScanFilter(LeafNode node) {
- // TODO Auto-generated method stub
- hasUnsupportedCondition = true;
- return null;
- }
-
- private boolean isFirstParitionColumn(String keyName) {
- return keyName.equalsIgnoreCase(firstPartcolumn);
- }
-
private boolean hasUnsupportedCondition() {
return hasUnsupportedCondition;
}
@@ -486,12 +591,12 @@
}
}
- public static PlanResult getFilterPlan(ExpressionTree exprTree, String firstPartitionColumn) throws MetaException {
+ public static PlanResult getFilterPlan(ExpressionTree exprTree, List parts) throws MetaException {
if (exprTree == null) {
// TODO: if exprTree is null, we should do what ObjectStore does. See HIVE-10102
return new PlanResult(new ScanPlan(), true);
}
- PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(firstPartitionColumn);
+ PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(parts);
exprTree.accept(pGenerator);
return new PlanResult(pGenerator.getPlan(), pGenerator.hasUnsupportedCondition());
}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java (revision 1672517)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java (working copy)
@@ -19,9 +19,11 @@
package org.apache.hadoop.hive.metastore.hbase;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -44,6 +46,7 @@
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Function;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
@@ -53,6 +56,7 @@
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.hbase.AggregateStatsCache.AggrColStatsCached;
+import org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator;
import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregator;
import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregatorFactory;
import org.apache.hadoop.hive.metastore.hbase.utils.BloomFilter;
@@ -485,12 +489,12 @@
* @return a list of partition objects.
* @throws IOException
*/
- List getPartitions(String dbName, String tableName, List> partValLists)
- throws IOException {
+ List getPartitions(String dbName, String tableName, List partTypes,
+ List> partValLists) throws IOException {
List parts = new ArrayList(partValLists.size());
List gets = new ArrayList(partValLists.size());
for (List partVals : partValLists) {
- byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partVals);
+ byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals);
Get get = new Get(key);
get.addColumn(CATALOG_CF, CATALOG_COL);
gets.add(get);
@@ -518,7 +522,8 @@
*/
void putPartition(Partition partition) throws IOException {
byte[] hash = putStorageDescriptor(partition.getSd());
- byte[][] serialized = HBaseUtils.serializePartition(partition, hash);
+ byte[][] serialized = HBaseUtils.serializePartition(partition,
+ HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()), hash);
store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]);
partCache.put(partition.getDbName(), partition.getTableName(), partition);
}
@@ -539,7 +544,8 @@
decrementStorageDescriptorRefCount(oldPart.getSd());
hash = putStorageDescriptor(newPart.getSd());
}
- byte[][] serialized = HBaseUtils.serializePartition(newPart, hash);
+ byte[][] serialized = HBaseUtils.serializePartition(newPart,
+ HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash);
store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]);
partCache.put(newPart.getDbName(), newPart.getTableName(), newPart);
}
@@ -554,7 +560,9 @@
List puts = new ArrayList(partitions.size());
for (Partition partition : partitions) {
byte[] hash = putStorageDescriptor(partition.getSd());
- byte[][] serialized = HBaseUtils.serializePartition(partition, hash);
+ List partTypes = HBaseUtils.getPartitionKeyTypes(
+ getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys());
+ byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash);
Put p = new Put(serialized[0]);
p.add(CATALOG_CF, CATALOG_COL, serialized[1]);
puts.add(p);
@@ -580,7 +588,9 @@
decrementStorageDescriptorRefCount(oldParts.get(i).getSd());
hash = putStorageDescriptor(newParts.get(i).getSd());
}
- byte[][] serialized = HBaseUtils.serializePartition(newParts.get(i), hash);
+ Partition newPart = newParts.get(i);
+ byte[][] serialized = HBaseUtils.serializePartition(newPart,
+ HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash);
Put p = new Put(serialized[0]);
p.add(CATALOG_CF, CATALOG_COL, serialized[1]);
puts.add(p);
@@ -608,8 +618,9 @@
? new ArrayList(cached).subList(0, maxPartitions)
: new ArrayList(cached);
}
- byte[] keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName);
- List parts = scanPartitionsWithFilter(keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), -1, null);
+ byte[] keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, new ArrayList(),
+ new ArrayList(), false);
+ List parts = scanPartitionsWithFilter(dbName, tableName, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), -1, null);
partCache.put(dbName, tableName, parts, true);
return maxPartitions < parts.size() ? parts.subList(0, maxPartitions) : parts;
}
@@ -656,72 +667,66 @@
if (table == null) {
throw new NoSuchObjectException("Unable to find table " + dbName + "." + tableName);
}
- if (partVals.size() == table.getPartitionKeys().size()) {
- keyPrefix = HBaseUtils.buildKey(keyElements.toArray(new String[keyElements.size()]));
- } else {
- keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(keyElements.toArray(
- new String[keyElements.size()]));
- }
+ keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName,
+ HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys().subList(0, keyElements.size())),
+ keyElements);
// Now, build a filter out of the remaining keys
- String regex = null;
+ List ranges = new ArrayList();
+ List ops = new ArrayList();
if (!(partVals.size() == table.getPartitionKeys().size() && firstStar == -1)) {
- StringBuilder buf = new StringBuilder(".*");
for (int i = Math.max(0, firstStar);
i < table.getPartitionKeys().size() && i < partVals.size(); i++) {
- buf.append(HBaseUtils.KEY_SEPARATOR);
if ("*".equals(partVals.get(i))) {
- buf.append("[^");
- buf.append(HBaseUtils.KEY_SEPARATOR);
- buf.append("]+");
+ PartitionKeyComparator.Range range = new PartitionKeyComparator.Range(
+ table.getPartitionKeys().get(i).getName(),
+ new PartitionKeyComparator.Mark(partVals.get(i), true),
+ new PartitionKeyComparator.Mark(partVals.get(i), true));
+ ranges.add(range);
} else {
- buf.append(partVals.get(i));
+ PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator(
+ PartitionKeyComparator.Operator.Type.LIKE,
+ table.getPartitionKeys().get(i).getName(),
+ ".*");
}
}
- if (partVals.size() < table.getPartitionKeys().size()) {
- buf.append(HBaseUtils.KEY_SEPARATOR);
- buf.append(".*");
- }
- regex = buf.toString();
}
Filter filter = null;
- if (regex != null) {
- filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex));
+ if (!ranges.isEmpty() || !ops.isEmpty()) {
+ filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new PartitionKeyComparator(
+ StringUtils.join(HBaseUtils.getPartitionNames(table.getPartitionKeys()), ","),
+ StringUtils.join(HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), ","),
+ ranges, ops));
}
if (LOG.isDebugEnabled()) {
LOG.debug("Scanning partitions with prefix <" + new String(keyPrefix) + "> and filter <" +
- regex + ">");
+ filter + ">");
}
- List parts = scanPartitionsWithFilter(keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), maxPartitions, filter);
+ List parts = scanPartitionsWithFilter(dbName, tableName, keyPrefix,
+ HBaseUtils.getEndPrefix(keyPrefix), maxPartitions, filter);
partCache.put(dbName, tableName, parts, false);
return parts;
}
List scanPartitions(String dbName, String tableName, byte[] keyStart, byte[] keyEnd,
Filter filter, int maxPartitions) throws IOException, NoSuchObjectException {
- List keyElements = new ArrayList();
- keyElements.add(dbName);
- keyElements.add(tableName);
-
- byte[] keyPrefix =
- HBaseUtils.buildKeyWithTrailingSeparator(keyElements.toArray(new String[keyElements.size()]));
- byte[] startRow = ArrayUtils.addAll(keyPrefix, keyStart);
+ byte[] startRow = keyStart;
byte[] endRow;
if (keyEnd == null || keyEnd.length == 0) {
// stop when current db+table entries are over
- endRow = HBaseUtils.getEndPrefix(keyPrefix);
+ endRow = HBaseUtils.getEndPrefix(startRow);
} else {
- endRow = ArrayUtils.addAll(keyPrefix, keyEnd);
+ endRow = keyEnd;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Scanning partitions with start row <" + new String(startRow) + "> and end row <"
+ new String(endRow) + ">");
}
- return scanPartitionsWithFilter(startRow, endRow, maxPartitions, filter);
+ return scanPartitionsWithFilter(dbName, tableName, startRow, endRow, maxPartitions, filter);
}
@@ -739,7 +744,8 @@
partCache.remove(dbName, tableName, partVals);
Partition p = getPartition(dbName, tableName, partVals, false);
decrementStorageDescriptorRefCount(p.getSd());
- byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partVals);
+ byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName,
+ HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals);
delete(PART_TABLE, key, null, null);
}
@@ -747,7 +753,8 @@
boolean populateCache) throws IOException {
Partition cached = partCache.get(dbName, tableName, partVals);
if (cached != null) return cached;
- byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partVals);
+ byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName,
+ HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals);
byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL);
if (serialized == null) return null;
HBaseUtils.StorageDescriptorParts sdParts =
@@ -758,17 +765,18 @@
return sdParts.containingPartition;
}
- private List scanPartitionsWithFilter(byte[] startRow, byte [] endRow,
- int maxResults, Filter filter)
+ private List scanPartitionsWithFilter(String dbName, String tableName,
+ byte[] startRow, byte [] endRow, int maxResults, Filter filter)
throws IOException {
Iterator iter =
scan(PART_TABLE, startRow, endRow, CATALOG_CF, CATALOG_COL, filter);
+ List tablePartitions = getTable(dbName, tableName).getPartitionKeys();
List parts = new ArrayList();
int numToFetch = maxResults < 0 ? Integer.MAX_VALUE : maxResults;
for (int i = 0; i < numToFetch && iter.hasNext(); i++) {
Result result = iter.next();
- HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(result.getRow(),
- result.getValue(CATALOG_CF, CATALOG_COL));
+ HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(dbName, tableName,
+ tablePartitions, result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL));
StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash);
HBaseUtils.assembleStorageDescriptor(sd, sdParts);
parts.add(sdParts.containingPartition);
@@ -1538,7 +1546,9 @@
for (int i = 0; i < colKeys.length; i++) {
colKeys[i] = HBaseUtils.buildKey(colNames.get(i));
}
- byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, partVals.get(pOff));
+ byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName,
+ HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()),
+ partVals.get(pOff));
Get get = new Get(partKey);
for (byte[] colName : colKeys) {
get.addColumn(STATS_CF, colName);
@@ -1634,7 +1644,9 @@
byte[] colKey = HBaseUtils.buildKey(colName);
// Build a list of Gets, one per partition
for (int pOff = 0; pOff < partNames.size(); pOff++) {
- byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, partVals.get(pOff));
+ byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName,
+ HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()),
+ partVals.get(pOff));
Get get = new Get(partKey);
get.addColumn(STATS_CF, colKey);
gets.add(get);
@@ -1678,9 +1690,11 @@
return colStatsAggr;
}
- private byte[] getStatisticsKey(String dbName, String tableName, List partVals) {
+ private byte[] getStatisticsKey(String dbName, String tableName, List partVals) throws IOException {
return partVals == null ? HBaseUtils.buildKey(dbName, tableName) : HBaseUtils
- .buildPartitionKey(dbName, tableName, partVals);
+ .buildPartitionKey(dbName, tableName,
+ HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()),
+ partVals);
}
private String getStatisticsTable(List partVals) {
Index: metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java (revision 1672517)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java (working copy)
@@ -518,7 +518,8 @@
boolean commit = false;
openTransaction();
try {
- List oldParts = getHBase().getPartitions(db_name, tbl_name, part_vals_list);
+ List oldParts = getHBase().getPartitions(db_name, tbl_name,
+ HBaseUtils.getPartitionKeyTypes(getTable(db_name, tbl_name).getPartitionKeys()), part_vals_list);
getHBase().replacePartitions(oldParts, new_parts);
commit = true;
} catch (IOException e) {
@@ -607,10 +608,8 @@
if (table == null) {
throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName);
}
- String firstPartitionColumn = table.getPartitionKeys().get(0).getName();
// general hbase filter plan from expression tree
- PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, firstPartitionColumn);
-
+ PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys());
if (LOG.isDebugEnabled()) {
LOG.debug("Hbase Filter Plan generated : " + planRes.plan);
}
@@ -621,7 +620,9 @@
for (ScanPlan splan : planRes.plan.getPlans()) {
try {
List parts = getHBase().scanPartitions(dbName, tblName,
- splan.getStartRowSuffix(), splan.getEndRowSuffix(), null, -1);
+ splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys()),
+ splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys()),
+ splan.getFilter(table.getPartitionKeys()), -1);
boolean reachedMax = false;
for (Partition part : parts) {
mergedParts.put(part.getValues(), part);
Index: metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java (revision 1672517)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java (working copy)
@@ -18,11 +18,14 @@
*/
package org.apache.hadoop.hive.metastore.hbase;
+import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
+
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
@@ -50,6 +53,19 @@
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.ByteStream.Output;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
+import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDeWithEndPrefix;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.BytesWritable;
import org.apache.thrift.TFieldIdEnum;
import java.io.IOException;
@@ -63,6 +79,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Properties;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
@@ -713,15 +730,31 @@
return sd;
}
+ static List getPartitionKeyTypes(List parts) {
+ com.google.common.base.Function fieldSchemaToType =
+ new com.google.common.base.Function() {
+ public String apply(FieldSchema fs) { return fs.getType(); }
+ };
+ return Lists.transform(parts, fieldSchemaToType);
+ }
+
+ static List getPartitionNames(List parts) {
+ com.google.common.base.Function fieldSchemaToName =
+ new com.google.common.base.Function() {
+ public String apply(FieldSchema fs) { return fs.getName(); }
+ };
+ return Lists.transform(parts, fieldSchemaToName);
+ }
+
/**
* Serialize a partition
* @param part partition object
* @param sdHash hash that is being used as a key for the enclosed storage descriptor
* @return First element is the key, second is the serialized partition
*/
- static byte[][] serializePartition(Partition part, byte[] sdHash) {
+ static byte[][] serializePartition(Partition part, List partTypes, byte[] sdHash) {
byte[][] result = new byte[2][];
- result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), part.getValues());
+ result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), partTypes, part.getValues());
HbaseMetastoreProto.Partition.Builder builder = HbaseMetastoreProto.Partition.newBuilder();
builder
.setCreateTime(part.getCreateTime())
@@ -736,13 +769,56 @@
return result;
}
- static byte[] buildPartitionKey(String dbName, String tableName, List partVals) {
- Deque keyParts = new ArrayDeque(partVals);
- keyParts.addFirst(tableName);
- keyParts.addFirst(dbName);
- return buildKey(keyParts.toArray(new String[keyParts.size()]));
+ static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals) {
+ return buildPartitionKey(dbName, tableName, partTypes, partVals, false);
}
+ static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, boolean endPrefix) {
+ Object[] components = new Object[partVals.size()];
+ for (int i=0;i partTypes, Object[] components, boolean endPrefix) {
+ ObjectInspector javaStringOI =
+ PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(PrimitiveCategory.STRING);
+ Object[] data = new Object[components.length+2];
+ List fois = new ArrayList(components.length+2);
+ boolean[] endPrefixes = new boolean[components.length+2];
+
+ data[0] = dbName;
+ fois.add(javaStringOI);
+ endPrefixes[0] = false;
+ data[1] = tableName;
+ fois.add(javaStringOI);
+ endPrefixes[1] = false;
+
+ for (int i = 0; i < components.length; i++) {
+ data[i+2] = components[i];
+ TypeInfo expectedType =
+ TypeInfoUtils.getTypeInfoFromTypeString(partTypes.get(i));
+ ObjectInspector outputOI =
+ TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType);
+ fois.add(outputOI);
+ }
+ Output output = new Output();
+ try {
+ BinarySortableSerDeWithEndPrefix.serializeStruct(output, data, fois, endPrefix);
+ } catch (SerDeException e) {
+ throw new RuntimeException("Cannot serialize partition " + StringUtils.join(components, ","));
+ }
+ return Arrays.copyOf(output.getData(), output.getLength());
+ }
+
static class StorageDescriptorParts {
byte[] sdHash;
String location;
@@ -772,11 +848,10 @@
* @param serialized the value fetched from HBase
* @return A struct that contains the partition plus parts of the storage descriptor
*/
- static StorageDescriptorParts deserializePartition(byte[] key, byte[] serialized)
- throws InvalidProtocolBufferException {
- String[] keys = deserializeKey(key);
- return deserializePartition(keys[0], keys[1],
- Arrays.asList(Arrays.copyOfRange(keys, 2, keys.length)), serialized);
+ static StorageDescriptorParts deserializePartition(String dbName, String tableName, List partitions,
+ byte[] key, byte[] serialized) throws InvalidProtocolBufferException {
+ List keys = deserializePartitionKey(partitions, key);
+ return deserializePartition(dbName, tableName, keys, serialized);
}
/**
@@ -812,6 +887,36 @@
return k.split(":");
}
+ private static List deserializePartitionKey(List partitions, byte[] key) {
+ StringBuffer names = new StringBuffer();
+ names.append("dbName,tableName,");
+ StringBuffer types = new StringBuffer();
+ types.append("string,string,");
+ for (int i=0;i partitionKeys = new ArrayList();
+ for (Object deserializedKey : deserializedkeys) {
+ partitionKeys.add(deserializedKey.toString());
+ }
+ return partitionKeys;
+ } catch (SerDeException e) {
+ throw new RuntimeException("Error when deserialize key", e);
+ }
+ }
+
/**
* Serialize a table
* @param table table object
Index: metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java (revision 0)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java (working copy)
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.BytesWritable;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+public class PartitionKeyComparator extends ByteArrayComparable {
+ private static final Log LOG = LogFactory.getLog(PartitionKeyComparator.class);
+ static class Mark {
+ Mark(String value, boolean inclusive) {
+ this.value = value;
+ this.inclusive = inclusive;
+ }
+ String value;
+ boolean inclusive;
+ public String toString() {
+ return value + (inclusive?"_":"");
+ }
+ }
+ static class Range {
+ Range(String keyName, Mark start, Mark end) {
+ this.keyName = keyName;
+ this.start = start;
+ this.end = end;
+ }
+ String keyName;
+ Mark start;
+ Mark end;
+ public String toString() {
+ return "" + keyName + ":" + (start!=null?start.toString():"") + (end!=null?end.toString():"");
+ }
+ }
+ static class Operator {
+ public Operator(Type type, String keyName, String val) {
+ this.type = type;
+ this.keyName = keyName;
+ this.val = val;
+ }
+ enum Type {
+ LIKE, NOTEQUALS
+ };
+ Type type;
+ String keyName;
+ String val;
+ }
+ String names;
+ String types;
+ List ranges;
+ List ops;
+ public PartitionKeyComparator(String names, String types, List ranges, List ops) {
+ super(null);
+ this.names = names;
+ this.types = types;
+ this.ranges = ranges;
+ this.ops = ops;
+ }
+
+ public static PartitionKeyComparator parseFrom(final byte [] bytes) {
+ HbaseMetastoreProto.PartitionKeyComparator proto;
+ try {
+ proto = HbaseMetastoreProto.PartitionKeyComparator.parseFrom(bytes);
+ } catch (InvalidProtocolBufferException e) {
+ throw new RuntimeException(e);
+ }
+ List ranges = new ArrayList();
+ for (HbaseMetastoreProto.PartitionKeyComparator.Range range : proto.getRangeList()) {
+ Mark start = null;
+ if (range.hasStart()) {
+ start = new Mark(range.getStart().getValue(), range.getStart().getInclusive());
+ }
+ Mark end = null;
+ if (range.hasEnd()) {
+ end = new Mark(range.getEnd().getValue(), range.getEnd().getInclusive());
+ }
+ ranges.add(new Range(range.getKey(), start, end));
+ }
+ List ops = new ArrayList();
+ for (HbaseMetastoreProto.PartitionKeyComparator.Operator op : proto.getOpList()) {
+ ops.add(new Operator(Operator.Type.valueOf(op.getType().name()), op.getKey(),
+ op.getVal()));
+ }
+ return new PartitionKeyComparator(proto.getNames(), proto.getTypes(), ranges, ops);
+ }
+
+ @Override
+ public byte[] toByteArray() {
+ HbaseMetastoreProto.PartitionKeyComparator.Builder builder =
+ HbaseMetastoreProto.PartitionKeyComparator.newBuilder();
+ builder.setNames(names);
+ builder.setTypes(types);
+ for (int i=0;i=0 ||
+ !range.start.inclusive && partVal.compareTo(start)>0) {
+ if (range.end == null || range.end.inclusive && partVal.compareTo(end)<=0 ||
+ !range.end.inclusive && partVal.compareTo(end)<0) {
+ continue;
+ }
+ }
+ LOG.info("Fail to match " + range.keyName + "-" + partVal + "[" + start + "," + end + "]");
+ return 1;
+ }
+
+ for (Operator op : ops) {
+ int pos = ArrayUtils.indexOf(names.split(","), op.keyName);
+ TypeInfo expectedType =
+ TypeInfoUtils.getTypeInfoFromTypeString(types.split(",")[pos]);
+ ObjectInspector outputOI =
+ TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType);
+ Converter converter = ObjectInspectorConverters.getConverter(
+ PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI);
+ Comparable val = (Comparable)converter.convert(op.val);
+ switch (op.type) {
+ case LIKE:
+ if (!deserializedkeys.get(pos).toString().matches(op.val)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Fail to match " + op.keyName + "(" + deserializedkeys.get(pos) + ") LIKE " + val);
+ }
+ return 1;
+ }
+ break;
+ case NOTEQUALS:
+ if (val.equals(deserializedkeys.get(pos))) {
+ if (LOG.isDebugEnabled()) {
+ LOG.info("Fail to match " + op.keyName + "(" + deserializedkeys.get(pos) + ")!=" + val);
+ }
+ return 1;
+ }
+ break;
+ }
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.info("Pass " + deserializedkeys);
+ }
+ return 0;
+ }
+
+}
Index: metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
===================================================================
--- metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto (revision 1672517)
+++ metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto (working copy)
@@ -218,7 +218,30 @@
optional bool is_temporary = 14;
}
+message PartitionKeyComparator {
+ required string names = 1;
+ required string types = 2;
+ message Mark {
+ required string value = 1;
+ required bool inclusive = 2;
+ }
+ message Range {
+ required string key = 1;
+ optional Mark start = 2;
+ optional Mark end = 3;
+ }
+ message Operator {
+ enum Type {
+ LIKE = 0;
+ NOTEQUALS = 1;
+ }
+ required Type type = 1;
+ required string key = 2;
+ required string val = 3;
+ }
+ repeated Operator op = 3;
+ repeated Range range = 4;
+}
-
Index: metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java
===================================================================
--- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java (revision 1672517)
+++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java (working copy)
@@ -18,12 +18,14 @@
*/
package org.apache.hadoop.hive.metastore.hbase;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.List;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.FilterPlan;
import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.MultiScanPlan;
-import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PartitionFilterGenerator;
import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult;
import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan;
import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan.ScanMarker;
@@ -68,31 +70,28 @@
ScanMarker r;
// equal plans
- l = new ScanMarker(new byte[] { 1, 2 }, INCLUSIVE);
- r = new ScanMarker(new byte[] { 1, 2 }, INCLUSIVE);
+ l = new ScanMarker("1", INCLUSIVE, "int");
+ r = new ScanMarker("1", INCLUSIVE, "int");
assertFirstGreater(l, r);
- l = new ScanMarker(new byte[] { 1, 2 }, !INCLUSIVE);
- r = new ScanMarker(new byte[] { 1, 2 }, !INCLUSIVE);
+ l = new ScanMarker("1", !INCLUSIVE, "int");
+ r = new ScanMarker("1", !INCLUSIVE, "int");
assertFirstGreater(l, r);
- l = new ScanMarker(null, !INCLUSIVE);
- r = new ScanMarker(null, !INCLUSIVE);
- assertFirstGreater(l, r);
+ assertFirstGreater(null, null);
// create l is greater because of inclusive flag
- l = new ScanMarker(new byte[] { 1, 2 }, !INCLUSIVE);
- r = new ScanMarker(null, !INCLUSIVE);
+ l = new ScanMarker("1", !INCLUSIVE, "int");
// the rule for null vs non-null is different
// non-null is both smaller and greater than null
- Assert.assertEquals(l, ScanPlan.getComparedMarker(l, r, true));
- Assert.assertEquals(l, ScanPlan.getComparedMarker(r, l, true));
- Assert.assertEquals(l, ScanPlan.getComparedMarker(l, r, false));
- Assert.assertEquals(l, ScanPlan.getComparedMarker(r, l, false));
+ Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, true));
+ Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, true));
+ Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, false));
+ Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, false));
// create l that is greater because of the bytes
- l = new ScanMarker(new byte[] { 1, 2, 0 }, INCLUSIVE);
- r = new ScanMarker(new byte[] { 1, 2 }, INCLUSIVE);
+ l = new ScanMarker("2", INCLUSIVE, "int");
+ r = new ScanMarker("1", INCLUSIVE, "int");
assertFirstGreater(l, r);
}
@@ -111,36 +110,30 @@
public void testScanPlanAnd() {
ScanPlan l = new ScanPlan();
ScanPlan r = new ScanPlan();
- l.setStartMarker(new ScanMarker(new byte[] { 10 }, INCLUSIVE));
- r.setStartMarker(new ScanMarker(new byte[] { 10 }, INCLUSIVE));
+ l.setStartMarker("a", "int", "10", INCLUSIVE);
+ r.setStartMarker("a", "int", "10", INCLUSIVE);
ScanPlan res;
// both equal
res = l.and(r).getPlans().get(0);
- Assert.assertEquals(new ScanMarker(new byte[] { 10 }, INCLUSIVE), res.getStartMarker());
+ Assert.assertEquals(new ScanMarker("10", INCLUSIVE, "int"), res.markers.get("a").startMarker);
// add equal end markers as well, and test AND again
- l.setEndMarker(new ScanMarker(new byte[] { 20 }, INCLUSIVE));
- r.setEndMarker(new ScanMarker(new byte[] { 20 }, INCLUSIVE));
+ l.setEndMarker("a", "int", "20", INCLUSIVE);
+ r.setEndMarker("a", "int", "20", INCLUSIVE);
res = l.and(r).getPlans().get(0);
- Assert.assertEquals(new ScanMarker(new byte[] { 10 }, INCLUSIVE), res.getStartMarker());
- Assert.assertEquals(new ScanMarker(new byte[] { 20 }, INCLUSIVE), res.getEndMarker());
+ Assert.assertEquals(new ScanMarker("10", INCLUSIVE, "int"), res.markers.get("a").startMarker);
+ Assert.assertEquals(new ScanMarker("20", INCLUSIVE, "int"), res.markers.get("a").endMarker);
- l.setEndMarker(new ScanMarker(null, INCLUSIVE));
- r.setStartMarker(new ScanMarker(null, !INCLUSIVE));
- // markers with non null bytes are both lesser and greator
- Assert.assertEquals(l.getStartMarker(), res.getStartMarker());
- Assert.assertEquals(r.getEndMarker(), res.getEndMarker());
+ l.setStartMarker("a", "int", "10", !INCLUSIVE);
+ l.setEndMarker("a", "int", "20", INCLUSIVE);
- l.setStartMarker(new ScanMarker(new byte[] { 10, 11 }, !INCLUSIVE));
- l.setEndMarker(new ScanMarker(new byte[] { 20, 21 }, INCLUSIVE));
-
- r.setStartMarker(new ScanMarker(new byte[] { 10, 10 }, INCLUSIVE));
- r.setEndMarker(new ScanMarker(new byte[] { 15 }, INCLUSIVE));
+ r.setStartMarker("a", "int", "10", INCLUSIVE);
+ r.setEndMarker("a", "int", "15", INCLUSIVE);
res = l.and(r).getPlans().get(0);
// start of l is greater, end of r is smaller
- Assert.assertEquals(l.getStartMarker(), res.getStartMarker());
- Assert.assertEquals(r.getEndMarker(), res.getEndMarker());
+ Assert.assertEquals(l.markers.get("a").startMarker, res.markers.get("a").startMarker);
+ Assert.assertEquals(r.markers.get("a").endMarker, res.markers.get("a").endMarker);
}
@@ -151,13 +144,13 @@
public void testScanPlanOr() {
ScanPlan l = new ScanPlan();
ScanPlan r = new ScanPlan();
- l.setStartMarker(new ScanMarker(new byte[] { 10 }, INCLUSIVE));
- r.setStartMarker(new ScanMarker(new byte[] { 11 }, INCLUSIVE));
+ l.setStartMarker("a", "int", "1", INCLUSIVE);
+ r.setStartMarker("a", "int", "11", INCLUSIVE);
FilterPlan res1 = l.or(r);
Assert.assertEquals(2, res1.getPlans().size());
- res1.getPlans().get(0).getStartMarker().equals(l.getStartMarker());
- res1.getPlans().get(1).getStartMarker().equals(r.getStartMarker());
+ res1.getPlans().get(0).markers.get("a").startMarker.equals(l.markers.get("a").startMarker);
+ res1.getPlans().get(1).markers.get("a").startMarker.equals(r.markers.get("a").startMarker);
FilterPlan res2 = res1.or(r);
Assert.assertEquals(3, res2.getPlans().size());
@@ -223,72 +216,85 @@
final String KEY = "k1";
final String VAL = "v1";
- final byte[] VAL_BYTES = PartitionFilterGenerator.toBytes(VAL);
+ final String OTHERKEY = "k2";
LeafNode l = new LeafNode();
l.keyName = KEY;
l.value = VAL;
- final ScanMarker DEFAULT_SCANMARKER = new ScanMarker(null, false);
+ final ScanMarker DEFAULT_SCANMARKER = null;
+ List parts = new ArrayList();
+ parts.add(new FieldSchema(KEY, "int", null));
+ parts.add(new FieldSchema(OTHERKEY, "int", null));
l.operator = Operator.EQUALS;
- verifyPlan(l, KEY, new ScanMarker(VAL_BYTES, INCLUSIVE), new ScanMarker(VAL_BYTES, INCLUSIVE));
+ verifyPlan(l, parts, KEY, new ScanMarker(VAL, INCLUSIVE, "int"), new ScanMarker(VAL, INCLUSIVE, "int"));
l.operator = Operator.GREATERTHAN;
- verifyPlan(l, KEY, new ScanMarker(VAL_BYTES, !INCLUSIVE), DEFAULT_SCANMARKER);
+ verifyPlan(l, parts, KEY, new ScanMarker(VAL, !INCLUSIVE, "int"), DEFAULT_SCANMARKER);
l.operator = Operator.GREATERTHANOREQUALTO;
- verifyPlan(l, KEY, new ScanMarker(VAL_BYTES, INCLUSIVE), DEFAULT_SCANMARKER);
+ verifyPlan(l, parts, KEY, new ScanMarker(VAL, INCLUSIVE, "int"), DEFAULT_SCANMARKER);
l.operator = Operator.LESSTHAN;
- verifyPlan(l, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL_BYTES, !INCLUSIVE));
+ verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL, !INCLUSIVE, "int"));
l.operator = Operator.LESSTHANOREQUALTO;
- verifyPlan(l, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL_BYTES, INCLUSIVE));
+ verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL, INCLUSIVE, "int"));
// following leaf node plans should currently have true for 'has unsupported condition',
// because of the unsupported operator
l.operator = Operator.NOTEQUALS;
- verifyPlan(l, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
+ verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
l.operator = Operator.NOTEQUALS2;
- verifyPlan(l, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
+ verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
l.operator = Operator.LIKE;
- verifyPlan(l, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
+ verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
// following leaf node plans should currently have true for 'has unsupported condition',
// because of the condition is not on first key
l.operator = Operator.EQUALS;
- verifyPlan(l, "NOT_FIRST_PART", DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
+ verifyPlan(l, parts, OTHERKEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, false);
l.operator = Operator.NOTEQUALS;
- verifyPlan(l, "NOT_FIRST_PART", DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
+ verifyPlan(l, parts, OTHERKEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
// if tree is null, it should return equivalent of full scan, and true
// for 'has unsupported condition'
- verifyPlan(null, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
+ verifyPlan(null, parts, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true);
}
- private void verifyPlan(TreeNode l, String keyName, ScanMarker startMarker, ScanMarker endMarker)
+ private void verifyPlan(TreeNode l, List parts, String keyName, ScanMarker startMarker, ScanMarker endMarker)
throws MetaException {
- verifyPlan(l, keyName, startMarker, endMarker, false);
+ verifyPlan(l, parts, keyName, startMarker, endMarker, false);
}
- private void verifyPlan(TreeNode l, String keyName, ScanMarker startMarker, ScanMarker endMarker,
+ private void verifyPlan(TreeNode l, List parts, String keyName, ScanMarker startMarker, ScanMarker endMarker,
boolean hasUnsupportedCondition) throws MetaException {
ExpressionTree e = null;
if (l != null) {
e = new ExpressionTree();
e.setRootForTest(l);
}
- PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, keyName);
+ PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts);
FilterPlan plan = planRes.plan;
Assert.assertEquals("Has unsupported condition", hasUnsupportedCondition,
planRes.hasUnsupportedCondition);
Assert.assertEquals(1, plan.getPlans().size());
ScanPlan splan = plan.getPlans().get(0);
- Assert.assertEquals(startMarker, splan.getStartMarker());
- Assert.assertEquals(endMarker, splan.getEndMarker());
+ if (startMarker != null) {
+ Assert.assertEquals(startMarker, splan.markers.get(keyName).startMarker);
+ } else {
+ Assert.assertTrue(splan.markers.get(keyName)==null ||
+ splan.markers.get(keyName).startMarker==null);
+ }
+ if (endMarker != null) {
+ Assert.assertEquals(endMarker, splan.markers.get(keyName).endMarker);
+ } else {
+ Assert.assertTrue(splan.markers.get(keyName)==null ||
+ splan.markers.get(keyName).endMarker==null);
+ }
}
/**
@@ -302,13 +308,14 @@
final String KEY = "k1";
final String VAL1 = "10";
final String VAL2 = "11";
- final byte[] VAL1_BYTES = PartitionFilterGenerator.toBytes(VAL1);
- final byte[] VAL2_BYTES = PartitionFilterGenerator.toBytes(VAL2);
LeafNode l = new LeafNode();
l.keyName = KEY;
l.value = VAL1;
- final ScanMarker DEFAULT_SCANMARKER = new ScanMarker(null, false);
+ final ScanMarker DEFAULT_SCANMARKER = null;
+ List parts = new ArrayList();
+ parts.add(new FieldSchema("k1", "int", null));
+
LeafNode r = new LeafNode();
r.keyName = KEY;
r.value = VAL2;
@@ -318,19 +325,19 @@
// verify plan for - k1 >= '10' and k1 < '11'
l.operator = Operator.GREATERTHANOREQUALTO;
r.operator = Operator.LESSTHAN;
- verifyPlan(tn, KEY, new ScanMarker(VAL1_BYTES, INCLUSIVE), new ScanMarker(VAL2_BYTES,
- !INCLUSIVE));
+ verifyPlan(tn, parts, KEY, new ScanMarker(VAL1, INCLUSIVE, "int"), new ScanMarker(VAL2,
+ !INCLUSIVE, "int"));
// verify plan for - k1 >= '10' and k1 > '11'
l.operator = Operator.GREATERTHANOREQUALTO;
r.operator = Operator.GREATERTHAN;
- verifyPlan(tn, KEY, new ScanMarker(VAL2_BYTES, !INCLUSIVE), DEFAULT_SCANMARKER);
+ verifyPlan(tn, parts, KEY, new ScanMarker(VAL2, !INCLUSIVE, "int"), DEFAULT_SCANMARKER);
// verify plan for - k1 >= '10' or k1 > '11'
tn = new TreeNode(l, LogicalOperator.OR, r);
ExpressionTree e = new ExpressionTree();
e.setRootForTest(tn);
- PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, KEY);
+ PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts);
Assert.assertEquals(2, planRes.plan.getPlans().size());
Assert.assertEquals(false, planRes.hasUnsupportedCondition);
@@ -338,7 +345,7 @@
TreeNode tn2 = new TreeNode(l, LogicalOperator.AND, tn);
e = new ExpressionTree();
e.setRootForTest(tn2);
- planRes = HBaseFilterPlanUtil.getFilterPlan(e, KEY);
+ planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts);
Assert.assertEquals(2, planRes.plan.getPlans().size());
Assert.assertEquals(false, planRes.hasUnsupportedCondition);
@@ -351,7 +358,7 @@
TreeNode tn3 = new TreeNode(tn2, LogicalOperator.OR, klike);
e = new ExpressionTree();
e.setRootForTest(tn3);
- planRes = HBaseFilterPlanUtil.getFilterPlan(e, KEY);
+ planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts);
Assert.assertEquals(3, planRes.plan.getPlans().size());
Assert.assertEquals(true, planRes.hasUnsupportedCondition);
Index: serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java (revision 0)
+++ serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java (working copy)
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.binarysortable;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.ByteStream.Output;
+import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+
+public class BinarySortableSerDeWithEndPrefix extends BinarySortableSerDe {
+ public static void serializeStruct(Output byteStream, Object[] fieldData,
+ List fieldOis, boolean endPrefix) throws SerDeException {
+ for (int i = 0; i < fieldData.length; i++) {
+ serialize(byteStream, fieldData[i], fieldOis.get(i), false);
+ }
+ if (endPrefix) {
+ if (fieldData[fieldData.length-1]!=null) {
+ byteStream.getData()[byteStream.getLength()-1]++;
+ } else {
+ byteStream.getData()[byteStream.getLength()-1]+=2;
+ }
+ }
+ }
+}
\ No newline at end of file