commit fad9af2f1bdefc007574af0235e0ef440c93488c Author: Daniel Dai Date: Fri Jun 24 11:07:36 2016 -0700 HIVE-14097: Fix TestCliDriver for hbase metastore diff --git a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java index 9f2a88c..7741677 100644 --- a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java +++ b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -18,9 +18,13 @@ public static void registerAllExtensions( */ USER(0, 0), /** - * ROLE = 1; + * GROUP = 1; */ - ROLE(1, 1), + GROUP(1, 1), + /** + * ROLE = 2; + */ + ROLE(2, 2), ; /** @@ -28,9 +32,13 @@ public static void registerAllExtensions( */ public static final int USER_VALUE = 0; /** - * ROLE = 1; + * GROUP = 1; + */ + public static final int GROUP_VALUE = 1; + /** + * ROLE = 2; */ - public static final int ROLE_VALUE = 1; + public static final int ROLE_VALUE = 2; public final int getNumber() { return value; } @@ -38,7 +46,8 @@ public static void registerAllExtensions( public static PrincipalType valueOf(int value) { switch (value) { case 0: return USER; - case 1: return ROLE; + case 1: return GROUP; + case 2: return ROLE; default: return null; } } @@ -3904,32 +3913,46 @@ public Builder clearMaxCacheEntryLife() { */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder(); - // optional string column_name = 11; + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + boolean hasDateStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats getDateStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStatsOrBuilder getDateStatsOrBuilder(); + + // optional string column_name = 12; /** - * optional string column_name = 11; + * optional string column_name = 12; */ boolean hasColumnName(); /** - * optional string column_name = 11; + * optional string column_name = 12; */ java.lang.String getColumnName(); /** - * optional string column_name = 11; + * optional string column_name = 12; */ com.google.protobuf.ByteString getColumnNameBytes(); - // optional string bit_vectors = 12; + // optional string bit_vectors = 13; /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ boolean hasBitVectors(); /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ java.lang.String getBitVectors(); /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ com.google.protobuf.ByteString getBitVectorsBytes(); @@ -4084,12 +4107,25 @@ private ColumnStats( break; } case 90: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000400) == 0x00000400)) { + subBuilder = dateStats_.toBuilder(); + } + dateStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(dateStats_); + dateStats_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000400; - columnName_ = input.readBytes(); break; } case 98: { bitField0_ |= 0x00000800; + columnName_ = input.readBytes(); + break; + } + case 106: { + bitField0_ |= 0x00001000; bitVectors_ = input.readBytes(); break; } @@ -5563,48 +5599,460 @@ public Builder clearHighValue() { // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats) } - public interface StringStatsOrBuilder + public interface DateOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional int64 max_col_length = 1; + // required int64 daysSinceEpoch = 1; /** - * optional int64 max_col_length = 1; + * required int64 daysSinceEpoch = 1; */ - boolean hasMaxColLength(); + boolean hasDaysSinceEpoch(); /** - * optional int64 max_col_length = 1; + * required int64 daysSinceEpoch = 1; */ - long getMaxColLength(); + long getDaysSinceEpoch(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date} + */ + public static final class Date extends + com.google.protobuf.GeneratedMessage + implements DateOrBuilder { + // Use Date.newBuilder() to construct. + private Date(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Date(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - // optional double avg_col_length = 2; + private static final Date defaultInstance; + public static Date getDefaultInstance() { + return defaultInstance; + } + + public Date getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Date( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + daysSinceEpoch_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Date parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Date(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int64 daysSinceEpoch = 1; + public static final int DAYSSINCEEPOCH_FIELD_NUMBER = 1; + private long daysSinceEpoch_; /** - * optional double avg_col_length = 2; + * required int64 daysSinceEpoch = 1; */ - boolean hasAvgColLength(); + public boolean hasDaysSinceEpoch() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } /** - * optional double avg_col_length = 2; + * required int64 daysSinceEpoch = 1; */ - double getAvgColLength(); + public long getDaysSinceEpoch() { + return daysSinceEpoch_; + } + + private void initFields() { + daysSinceEpoch_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDaysSinceEpoch()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, daysSinceEpoch_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, daysSinceEpoch_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + daysSinceEpoch_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.daysSinceEpoch_ = daysSinceEpoch_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance()) return this; + if (other.hasDaysSinceEpoch()) { + setDaysSinceEpoch(other.getDaysSinceEpoch()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDaysSinceEpoch()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int64 daysSinceEpoch = 1; + private long daysSinceEpoch_ ; + /** + * required int64 daysSinceEpoch = 1; + */ + public boolean hasDaysSinceEpoch() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 daysSinceEpoch = 1; + */ + public long getDaysSinceEpoch() { + return daysSinceEpoch_; + } + /** + * required int64 daysSinceEpoch = 1; + */ + public Builder setDaysSinceEpoch(long value) { + bitField0_ |= 0x00000001; + daysSinceEpoch_ = value; + onChanged(); + return this; + } + /** + * required int64 daysSinceEpoch = 1; + */ + public Builder clearDaysSinceEpoch() { + bitField0_ = (bitField0_ & ~0x00000001); + daysSinceEpoch_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date) + } + + static { + defaultInstance = new Date(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date) + } + + public interface DateStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + boolean hasLowValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date getLowValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder getLowValueOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + boolean hasHighValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date getHighValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder getHighValueOrBuilder(); } /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats} */ - public static final class StringStats extends + public static final class DateStats extends com.google.protobuf.GeneratedMessage - implements StringStatsOrBuilder { - // Use StringStats.newBuilder() to construct. - private StringStats(com.google.protobuf.GeneratedMessage.Builder builder) { + implements DateStatsOrBuilder { + // Use DateStats.newBuilder() to construct. + private DateStats(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private StringStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private DateStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final StringStats defaultInstance; - public static StringStats getDefaultInstance() { + private static final DateStats defaultInstance; + public static DateStats getDefaultInstance() { return defaultInstance; } - public StringStats getDefaultInstanceForType() { + public DateStats getDefaultInstanceForType() { return defaultInstance; } @@ -5614,7 +6062,722 @@ public StringStats getDefaultInstanceForType() { getUnknownFields() { return this.unknownFields; } - private StringStats( + private DateStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = lowValue_.toBuilder(); + } + lowValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(lowValue_); + lowValue_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = highValue_.toBuilder(); + } + highValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(highValue_); + highValue_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DateStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DateStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + public static final int LOW_VALUE_FIELD_NUMBER = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date lowValue_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date getLowValue() { + return lowValue_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder getLowValueOrBuilder() { + return lowValue_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + public static final int HIGH_VALUE_FIELD_NUMBER = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date highValue_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date getHighValue() { + return highValue_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder getHighValueOrBuilder() { + return highValue_; + } + + private void initFields() { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasLowValue()) { + if (!getLowValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasHighValue()) { + if (!getHighValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, highValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, highValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getLowValueFieldBuilder(); + getHighValueFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (lowValueBuilder_ == null) { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + } else { + lowValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (highValueBuilder_ == null) { + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + } else { + highValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (lowValueBuilder_ == null) { + result.lowValue_ = lowValue_; + } else { + result.lowValue_ = lowValueBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (highValueBuilder_ == null) { + result.highValue_ = highValue_; + } else { + result.highValue_ = highValueBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.getDefaultInstance()) return this; + if (other.hasLowValue()) { + mergeLowValue(other.getLowValue()); + } + if (other.hasHighValue()) { + mergeHighValue(other.getHighValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasLowValue()) { + if (!getLowValue().isInitialized()) { + + return false; + } + } + if (hasHighValue()) { + if (!getHighValue().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder> lowValueBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date getLowValue() { + if (lowValueBuilder_ == null) { + return lowValue_; + } else { + return lowValueBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public Builder setLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date value) { + if (lowValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lowValue_ = value; + onChanged(); + } else { + lowValueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public Builder setLowValue( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder builderForValue) { + if (lowValueBuilder_ == null) { + lowValue_ = builderForValue.build(); + onChanged(); + } else { + lowValueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public Builder mergeLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date value) { + if (lowValueBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + lowValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance()) { + lowValue_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.newBuilder(lowValue_).mergeFrom(value).buildPartial(); + } else { + lowValue_ = value; + } + onChanged(); + } else { + lowValueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public Builder clearLowValue() { + if (lowValueBuilder_ == null) { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + onChanged(); + } else { + lowValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder getLowValueBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getLowValueFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder getLowValueOrBuilder() { + if (lowValueBuilder_ != null) { + return lowValueBuilder_.getMessageOrBuilder(); + } else { + return lowValue_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date low_value = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder> + getLowValueFieldBuilder() { + if (lowValueBuilder_ == null) { + lowValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder>( + lowValue_, + getParentForChildren(), + isClean()); + lowValue_ = null; + } + return lowValueBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder> highValueBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date getHighValue() { + if (highValueBuilder_ == null) { + return highValue_; + } else { + return highValueBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public Builder setHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date value) { + if (highValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + highValue_ = value; + onChanged(); + } else { + highValueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public Builder setHighValue( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder builderForValue) { + if (highValueBuilder_ == null) { + highValue_ = builderForValue.build(); + onChanged(); + } else { + highValueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public Builder mergeHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date value) { + if (highValueBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + highValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance()) { + highValue_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.newBuilder(highValue_).mergeFrom(value).buildPartial(); + } else { + highValue_ = value; + } + onChanged(); + } else { + highValueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public Builder clearHighValue() { + if (highValueBuilder_ == null) { + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.getDefaultInstance(); + onChanged(); + } else { + highValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder getHighValueBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getHighValueFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder getHighValueOrBuilder() { + if (highValueBuilder_ != null) { + return highValueBuilder_.getMessageOrBuilder(); + } else { + return highValue_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.Date high_value = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder> + getHighValueFieldBuilder() { + if (highValueBuilder_ == null) { + highValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Date.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateOrBuilder>( + highValue_, + getParentForChildren(), + isClean()); + highValue_ = null; + } + return highValueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats) + } + + static { + defaultInstance = new DateStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats) + } + + public interface StringStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 max_col_length = 1; + /** + * optional int64 max_col_length = 1; + */ + boolean hasMaxColLength(); + /** + * optional int64 max_col_length = 1; + */ + long getMaxColLength(); + + // optional double avg_col_length = 2; + /** + * optional double avg_col_length = 2; + */ + boolean hasAvgColLength(); + /** + * optional double avg_col_length = 2; + */ + double getAvgColLength(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} + */ + public static final class StringStats extends + com.google.protobuf.GeneratedMessage + implements StringStatsOrBuilder { + // Use StringStats.newBuilder() to construct. + private StringStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StringStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StringStats defaultInstance; + public static StringStats getDefaultInstance() { + return defaultInstance; + } + + public StringStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StringStats( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -7483,17 +8646,39 @@ public boolean hasDecimalStats() { return decimalStats_; } - // optional string column_name = 11; - public static final int COLUMN_NAME_FIELD_NUMBER = 11; + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + public static final int DATE_STATS_FIELD_NUMBER = 11; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats dateStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public boolean hasDateStats() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats getDateStats() { + return dateStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStatsOrBuilder getDateStatsOrBuilder() { + return dateStats_; + } + + // optional string column_name = 12; + public static final int COLUMN_NAME_FIELD_NUMBER = 12; private java.lang.Object columnName_; /** - * optional string column_name = 11; + * optional string column_name = 12; */ public boolean hasColumnName() { - return ((bitField0_ & 0x00000400) == 0x00000400); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** - * optional string column_name = 11; + * optional string column_name = 12; */ public java.lang.String getColumnName() { java.lang.Object ref = columnName_; @@ -7510,7 +8695,7 @@ public boolean hasColumnName() { } } /** - * optional string column_name = 11; + * optional string column_name = 12; */ public com.google.protobuf.ByteString getColumnNameBytes() { @@ -7526,17 +8711,17 @@ public boolean hasColumnName() { } } - // optional string bit_vectors = 12; - public static final int BIT_VECTORS_FIELD_NUMBER = 12; + // optional string bit_vectors = 13; + public static final int BIT_VECTORS_FIELD_NUMBER = 13; private java.lang.Object bitVectors_; /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public boolean hasBitVectors() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public java.lang.String getBitVectors() { java.lang.Object ref = bitVectors_; @@ -7553,7 +8738,7 @@ public boolean hasBitVectors() { } } /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public com.google.protobuf.ByteString getBitVectorsBytes() { @@ -7580,6 +8765,7 @@ private void initFields() { stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + dateStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.getDefaultInstance(); columnName_ = ""; bitVectors_ = ""; } @@ -7598,6 +8784,12 @@ public final boolean isInitialized() { return false; } } + if (hasDateStats()) { + if (!getDateStats().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -7636,10 +8828,13 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeMessage(10, decimalStats_); } if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeBytes(11, getColumnNameBytes()); + output.writeMessage(11, dateStats_); } if (((bitField0_ & 0x00000800) == 0x00000800)) { - output.writeBytes(12, getBitVectorsBytes()); + output.writeBytes(12, getColumnNameBytes()); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + output.writeBytes(13, getBitVectorsBytes()); } getUnknownFields().writeTo(output); } @@ -7692,11 +8887,15 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(11, getColumnNameBytes()); + .computeMessageSize(11, dateStats_); } if (((bitField0_ & 0x00000800) == 0x00000800)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(12, getBitVectorsBytes()); + .computeBytesSize(12, getColumnNameBytes()); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(13, getBitVectorsBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -7812,6 +9011,7 @@ private void maybeForceBuilderInitialization() { getStringStatsFieldBuilder(); getBinaryStatsFieldBuilder(); getDecimalStatsFieldBuilder(); + getDateStatsFieldBuilder(); } } private static Builder create() { @@ -7864,10 +9064,16 @@ public Builder clear() { decimalStatsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000200); - columnName_ = ""; + if (dateStatsBuilder_ == null) { + dateStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.getDefaultInstance(); + } else { + dateStatsBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000400); - bitVectors_ = ""; + columnName_ = ""; bitField0_ = (bitField0_ & ~0x00000800); + bitVectors_ = ""; + bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -7963,10 +9169,18 @@ public Builder clone() { if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000400; } - result.columnName_ = columnName_; + if (dateStatsBuilder_ == null) { + result.dateStats_ = dateStats_; + } else { + result.dateStats_ = dateStatsBuilder_.build(); + } if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000800; } + result.columnName_ = columnName_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00001000; + } result.bitVectors_ = bitVectors_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -8016,13 +9230,16 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr if (other.hasDecimalStats()) { mergeDecimalStats(other.getDecimalStats()); } + if (other.hasDateStats()) { + mergeDateStats(other.getDateStats()); + } if (other.hasColumnName()) { - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; columnName_ = other.columnName_; onChanged(); } if (other.hasBitVectors()) { - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; bitVectors_ = other.bitVectors_; onChanged(); } @@ -8041,6 +9258,12 @@ public final boolean isInitialized() { return false; } } + if (hasDateStats()) { + if (!getDateStats().isInitialized()) { + + return false; + } + } return true; } @@ -8938,16 +10161,133 @@ public Builder clearDecimalStats() { return decimalStatsBuilder_; } - // optional string column_name = 11; + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats dateStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStatsOrBuilder> dateStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public boolean hasDateStats() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats getDateStats() { + if (dateStatsBuilder_ == null) { + return dateStats_; + } else { + return dateStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public Builder setDateStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats value) { + if (dateStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + dateStats_ = value; + onChanged(); + } else { + dateStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public Builder setDateStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder builderForValue) { + if (dateStatsBuilder_ == null) { + dateStats_ = builderForValue.build(); + onChanged(); + } else { + dateStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public Builder mergeDateStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats value) { + if (dateStatsBuilder_ == null) { + if (((bitField0_ & 0x00000400) == 0x00000400) && + dateStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.getDefaultInstance()) { + dateStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.newBuilder(dateStats_).mergeFrom(value).buildPartial(); + } else { + dateStats_ = value; + } + onChanged(); + } else { + dateStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public Builder clearDateStats() { + if (dateStatsBuilder_ == null) { + dateStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.getDefaultInstance(); + onChanged(); + } else { + dateStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder getDateStatsBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return getDateStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStatsOrBuilder getDateStatsOrBuilder() { + if (dateStatsBuilder_ != null) { + return dateStatsBuilder_.getMessageOrBuilder(); + } else { + return dateStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DateStats date_stats = 11; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStatsOrBuilder> + getDateStatsFieldBuilder() { + if (dateStatsBuilder_ == null) { + dateStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DateStatsOrBuilder>( + dateStats_, + getParentForChildren(), + isClean()); + dateStats_ = null; + } + return dateStatsBuilder_; + } + + // optional string column_name = 12; private java.lang.Object columnName_ = ""; /** - * optional string column_name = 11; + * optional string column_name = 12; */ public boolean hasColumnName() { - return ((bitField0_ & 0x00000400) == 0x00000400); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** - * optional string column_name = 11; + * optional string column_name = 12; */ public java.lang.String getColumnName() { java.lang.Object ref = columnName_; @@ -8961,7 +10301,7 @@ public boolean hasColumnName() { } } /** - * optional string column_name = 11; + * optional string column_name = 12; */ public com.google.protobuf.ByteString getColumnNameBytes() { @@ -8977,51 +10317,51 @@ public boolean hasColumnName() { } } /** - * optional string column_name = 11; + * optional string column_name = 12; */ public Builder setColumnName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; columnName_ = value; onChanged(); return this; } /** - * optional string column_name = 11; + * optional string column_name = 12; */ public Builder clearColumnName() { - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000800); columnName_ = getDefaultInstance().getColumnName(); onChanged(); return this; } /** - * optional string column_name = 11; + * optional string column_name = 12; */ public Builder setColumnNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; columnName_ = value; onChanged(); return this; } - // optional string bit_vectors = 12; + // optional string bit_vectors = 13; private java.lang.Object bitVectors_ = ""; /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public boolean hasBitVectors() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public java.lang.String getBitVectors() { java.lang.Object ref = bitVectors_; @@ -9035,7 +10375,7 @@ public boolean hasBitVectors() { } } /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public com.google.protobuf.ByteString getBitVectorsBytes() { @@ -9051,36 +10391,36 @@ public boolean hasBitVectors() { } } /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public Builder setBitVectors( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; bitVectors_ = value; onChanged(); return this; } /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public Builder clearBitVectors() { - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00001000); bitVectors_ = getDefaultInstance().getBitVectors(); onChanged(); return this; } /** - * optional string bit_vectors = 12; + * optional string bit_vectors = 13; */ public Builder setBitVectorsBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; bitVectors_ = value; onChanged(); return this; @@ -15531,13 +16871,13 @@ public Builder removeParameter(int index) { */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); - // required bytes sd_hash = 5; + // optional bytes sd_hash = 5; /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ boolean hasSdHash(); /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ com.google.protobuf.ByteString getSdHash(); @@ -15566,6 +16906,20 @@ public Builder removeParameter(int index) { * */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + boolean hasPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder(); } /** * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Partition} @@ -15664,6 +17018,19 @@ private Partition( bitField0_ |= 0x00000020; break; } + case 58: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + subBuilder = privileges_.toBuilder(); + } + privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(privileges_); + privileges_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000040; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -15813,17 +17180,17 @@ public boolean hasSdParameters() { return sdParameters_; } - // required bytes sd_hash = 5; + // optional bytes sd_hash = 5; public static final int SD_HASH_FIELD_NUMBER = 5; private com.google.protobuf.ByteString sdHash_; /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ public boolean hasSdHash() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ public com.google.protobuf.ByteString getSdHash() { return sdHash_; @@ -15863,6 +17230,28 @@ public boolean hasParameters() { return parameters_; } + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + public static final int PRIVILEGES_FIELD_NUMBER = 7; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + return privileges_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + return privileges_; + } + private void initFields() { createTime_ = 0L; lastAccessTime_ = 0L; @@ -15870,16 +17259,13 @@ private void initFields() { sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); sdHash_ = com.google.protobuf.ByteString.EMPTY; parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasSdHash()) { - memoizedIsInitialized = 0; - return false; - } if (hasSdParameters()) { if (!getSdParameters().isInitialized()) { memoizedIsInitialized = 0; @@ -15892,6 +17278,12 @@ public final boolean isInitialized() { return false; } } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -15917,6 +17309,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeMessage(6, parameters_); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeMessage(7, privileges_); + } getUnknownFields().writeTo(output); } @@ -15950,6 +17345,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(6, parameters_); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, privileges_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -16060,6 +17459,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getSdParametersFieldBuilder(); getParametersFieldBuilder(); + getPrivilegesFieldBuilder(); } } private static Builder create() { @@ -16088,6 +17488,12 @@ public Builder clear() { parametersBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -16148,6 +17554,14 @@ public Builder clone() { } else { result.parameters_ = parametersBuilder_.build(); } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + if (privilegesBuilder_ == null) { + result.privileges_ = privileges_; + } else { + result.privileges_ = privilegesBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -16184,15 +17598,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr if (other.hasParameters()) { mergeParameters(other.getParameters()); } + if (other.hasPrivileges()) { + mergePrivileges(other.getPrivileges()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasSdHash()) { - - return false; - } if (hasSdParameters()) { if (!getSdParameters().isInitialized()) { @@ -16205,6 +17618,12 @@ public final boolean isInitialized() { return false; } } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + + return false; + } + } return true; } @@ -16520,22 +17939,22 @@ public Builder clearSdParameters() { return sdParametersBuilder_; } - // required bytes sd_hash = 5; + // optional bytes sd_hash = 5; private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ public boolean hasSdHash() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ public com.google.protobuf.ByteString getSdHash() { return sdHash_; } /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ public Builder setSdHash(com.google.protobuf.ByteString value) { if (value == null) { @@ -16547,7 +17966,7 @@ public Builder setSdHash(com.google.protobuf.ByteString value) { return this; } /** - * required bytes sd_hash = 5; + * optional bytes sd_hash = 5; */ public Builder clearSdHash() { bitField0_ = (bitField0_ & ~0x00000010); @@ -16709,6 +18128,123 @@ public Builder clearParameters() { return parametersBuilder_; } + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + if (privilegesBuilder_ == null) { + return privileges_; + } else { + return privilegesBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + privileges_ = value; + onChanged(); + } else { + privilegesBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public Builder setPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { + if (privilegesBuilder_ == null) { + privileges_ = builderForValue.build(); + onChanged(); + } else { + privilegesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040) && + privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { + privileges_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); + } else { + privileges_ = value; + } + onChanged(); + } else { + privilegesBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getPrivilegesFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilder(); + } else { + return privileges_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( + privileges_, + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Partition) } @@ -17589,27 +19125,52 @@ public Builder removePrivileges(int index) { org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( int index); - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + java.util.List + getGroupsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getGroups(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + int getGroupsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + java.util.List + getGroupsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getGroupsOrBuilder( + int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ java.util.List getRolesList(); /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index); /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ int getRolesCount(); /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ java.util.List getRolesOrBuilderList(); /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( int index); @@ -17675,9 +19236,17 @@ private PrincipalPrivilegeSet( } case 18: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - roles_ = new java.util.ArrayList(); + groups_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } + groups_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + roles_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } roles_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); break; } @@ -17693,6 +19262,9 @@ private PrincipalPrivilegeSet( users_ = java.util.Collections.unmodifiableList(users_); } if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + groups_ = java.util.Collections.unmodifiableList(groups_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { roles_ = java.util.Collections.unmodifiableList(roles_); } this.unknownFields = unknownFields.build(); @@ -17762,36 +19334,72 @@ public int getUsersCount() { return users_.get(index); } - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - public static final int ROLES_FIELD_NUMBER = 2; + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + public static final int GROUPS_FIELD_NUMBER = 2; + private java.util.List groups_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public java.util.List getGroupsList() { + return groups_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public java.util.List + getGroupsOrBuilderList() { + return groups_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public int getGroupsCount() { + return groups_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getGroups(int index) { + return groups_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getGroupsOrBuilder( + int index) { + return groups_.get(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; + public static final int ROLES_FIELD_NUMBER = 3; private java.util.List roles_; /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public java.util.List getRolesList() { return roles_; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public java.util.List getRolesOrBuilderList() { return roles_; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public int getRolesCount() { return roles_.size(); } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { return roles_.get(index); } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( int index) { @@ -17800,6 +19408,7 @@ public int getRolesCount() { private void initFields() { users_ = java.util.Collections.emptyList(); + groups_ = java.util.Collections.emptyList(); roles_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; @@ -17813,6 +19422,12 @@ public final boolean isInitialized() { return false; } } + for (int i = 0; i < getGroupsCount(); i++) { + if (!getGroups(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } for (int i = 0; i < getRolesCount(); i++) { if (!getRoles(i).isInitialized()) { memoizedIsInitialized = 0; @@ -17829,8 +19444,11 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) for (int i = 0; i < users_.size(); i++) { output.writeMessage(1, users_.get(i)); } + for (int i = 0; i < groups_.size(); i++) { + output.writeMessage(2, groups_.get(i)); + } for (int i = 0; i < roles_.size(); i++) { - output.writeMessage(2, roles_.get(i)); + output.writeMessage(3, roles_.get(i)); } getUnknownFields().writeTo(output); } @@ -17845,9 +19463,13 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, users_.get(i)); } + for (int i = 0; i < groups_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, groups_.get(i)); + } for (int i = 0; i < roles_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, roles_.get(i)); + .computeMessageSize(3, roles_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -17958,6 +19580,7 @@ private Builder( private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getUsersFieldBuilder(); + getGroupsFieldBuilder(); getRolesFieldBuilder(); } } @@ -17973,9 +19596,15 @@ public Builder clear() { } else { usersBuilder_.clear(); } + if (groupsBuilder_ == null) { + groups_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + groupsBuilder_.clear(); + } if (rolesBuilder_ == null) { roles_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); } else { rolesBuilder_.clear(); } @@ -18015,11 +19644,20 @@ public Builder clone() { } else { result.users_ = usersBuilder_.build(); } - if (rolesBuilder_ == null) { + if (groupsBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { - roles_ = java.util.Collections.unmodifiableList(roles_); + groups_ = java.util.Collections.unmodifiableList(groups_); bitField0_ = (bitField0_ & ~0x00000002); } + result.groups_ = groups_; + } else { + result.groups_ = groupsBuilder_.build(); + } + if (rolesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + roles_ = java.util.Collections.unmodifiableList(roles_); + bitField0_ = (bitField0_ & ~0x00000004); + } result.roles_ = roles_; } else { result.roles_ = rolesBuilder_.build(); @@ -18065,11 +19703,37 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr } } } + if (groupsBuilder_ == null) { + if (!other.groups_.isEmpty()) { + if (groups_.isEmpty()) { + groups_ = other.groups_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureGroupsIsMutable(); + groups_.addAll(other.groups_); + } + onChanged(); + } + } else { + if (!other.groups_.isEmpty()) { + if (groupsBuilder_.isEmpty()) { + groupsBuilder_.dispose(); + groupsBuilder_ = null; + groups_ = other.groups_; + bitField0_ = (bitField0_ & ~0x00000002); + groupsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getGroupsFieldBuilder() : null; + } else { + groupsBuilder_.addAllMessages(other.groups_); + } + } + } if (rolesBuilder_ == null) { if (!other.roles_.isEmpty()) { if (roles_.isEmpty()) { roles_ = other.roles_; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); } else { ensureRolesIsMutable(); roles_.addAll(other.roles_); @@ -18082,7 +19746,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr rolesBuilder_.dispose(); rolesBuilder_ = null; roles_ = other.roles_; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); rolesBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getRolesFieldBuilder() : null; @@ -18102,6 +19766,12 @@ public final boolean isInitialized() { return false; } } + for (int i = 0; i < getGroupsCount(); i++) { + if (!getGroups(i).isInitialized()) { + + return false; + } + } for (int i = 0; i < getRolesCount(); i++) { if (!getRoles(i).isInitialized()) { @@ -18370,13 +20040,253 @@ public Builder removeUsers(int index) { return usersBuilder_; } - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + private java.util.List groups_ = + java.util.Collections.emptyList(); + private void ensureGroupsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + groups_ = new java.util.ArrayList(groups_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> groupsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public java.util.List getGroupsList() { + if (groupsBuilder_ == null) { + return java.util.Collections.unmodifiableList(groups_); + } else { + return groupsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public int getGroupsCount() { + if (groupsBuilder_ == null) { + return groups_.size(); + } else { + return groupsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getGroups(int index) { + if (groupsBuilder_ == null) { + return groups_.get(index); + } else { + return groupsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder setGroups( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (groupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupsIsMutable(); + groups_.set(index, value); + onChanged(); + } else { + groupsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder setGroups( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (groupsBuilder_ == null) { + ensureGroupsIsMutable(); + groups_.set(index, builderForValue.build()); + onChanged(); + } else { + groupsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder addGroups(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (groupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupsIsMutable(); + groups_.add(value); + onChanged(); + } else { + groupsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder addGroups( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (groupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupsIsMutable(); + groups_.add(index, value); + onChanged(); + } else { + groupsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder addGroups( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (groupsBuilder_ == null) { + ensureGroupsIsMutable(); + groups_.add(builderForValue.build()); + onChanged(); + } else { + groupsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder addGroups( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (groupsBuilder_ == null) { + ensureGroupsIsMutable(); + groups_.add(index, builderForValue.build()); + onChanged(); + } else { + groupsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder addAllGroups( + java.lang.Iterable values) { + if (groupsBuilder_ == null) { + ensureGroupsIsMutable(); + super.addAll(values, groups_); + onChanged(); + } else { + groupsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder clearGroups() { + if (groupsBuilder_ == null) { + groups_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + groupsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public Builder removeGroups(int index) { + if (groupsBuilder_ == null) { + ensureGroupsIsMutable(); + groups_.remove(index); + onChanged(); + } else { + groupsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getGroupsBuilder( + int index) { + return getGroupsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getGroupsOrBuilder( + int index) { + if (groupsBuilder_ == null) { + return groups_.get(index); } else { + return groupsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public java.util.List + getGroupsOrBuilderList() { + if (groupsBuilder_ != null) { + return groupsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(groups_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addGroupsBuilder() { + return getGroupsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addGroupsBuilder( + int index) { + return getGroupsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry groups = 2; + */ + public java.util.List + getGroupsBuilderList() { + return getGroupsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> + getGroupsFieldBuilder() { + if (groupsBuilder_ == null) { + groupsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( + groups_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + groups_ = null; + } + return groupsBuilder_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; private java.util.List roles_ = java.util.Collections.emptyList(); private void ensureRolesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { roles_ = new java.util.ArrayList(roles_); - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; } } @@ -18384,7 +20294,7 @@ private void ensureRolesIsMutable() { org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> rolesBuilder_; /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public java.util.List getRolesList() { if (rolesBuilder_ == null) { @@ -18394,7 +20304,7 @@ private void ensureRolesIsMutable() { } } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public int getRolesCount() { if (rolesBuilder_ == null) { @@ -18404,7 +20314,7 @@ public int getRolesCount() { } } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { if (rolesBuilder_ == null) { @@ -18414,7 +20324,7 @@ public int getRolesCount() { } } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder setRoles( int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { @@ -18431,7 +20341,7 @@ public Builder setRoles( return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder setRoles( int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { @@ -18445,7 +20355,7 @@ public Builder setRoles( return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder addRoles(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { if (rolesBuilder_ == null) { @@ -18461,7 +20371,7 @@ public Builder addRoles(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePro return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder addRoles( int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { @@ -18478,7 +20388,7 @@ public Builder addRoles( return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder addRoles( org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { @@ -18492,7 +20402,7 @@ public Builder addRoles( return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder addRoles( int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { @@ -18506,7 +20416,7 @@ public Builder addRoles( return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder addAllRoles( java.lang.Iterable values) { @@ -18520,12 +20430,12 @@ public Builder addAllRoles( return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder clearRoles() { if (rolesBuilder_ == null) { roles_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { rolesBuilder_.clear(); @@ -18533,7 +20443,7 @@ public Builder clearRoles() { return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public Builder removeRoles(int index) { if (rolesBuilder_ == null) { @@ -18546,14 +20456,14 @@ public Builder removeRoles(int index) { return this; } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getRolesBuilder( int index) { return getRolesFieldBuilder().getBuilder(index); } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( int index) { @@ -18563,7 +20473,7 @@ public Builder removeRoles(int index) { } } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public java.util.List getRolesOrBuilderList() { @@ -18574,14 +20484,14 @@ public Builder removeRoles(int index) { } } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder() { return getRolesFieldBuilder().addBuilder( org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder( int index) { @@ -18589,7 +20499,7 @@ public Builder removeRoles(int index) { index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); } /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 3; */ public java.util.List getRolesBuilderList() { @@ -18602,7 +20512,7 @@ public Builder removeRoles(int index) { rolesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( roles_, - ((bitField0_ & 0x00000002) == 0x00000002), + ((bitField0_ & 0x00000004) == 0x00000004), getParentForChildren(), isClean()); roles_ = null; @@ -28305,13 +30215,13 @@ public Builder clearStoredAsSubDirectories() { */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); - // required bytes sd_hash = 7; + // optional bytes sd_hash = 7; /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ boolean hasSdHash(); /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ com.google.protobuf.ByteString getSdHash(); @@ -28782,17 +30692,17 @@ public boolean hasSdParameters() { return sdParameters_; } - // required bytes sd_hash = 7; + // optional bytes sd_hash = 7; public static final int SD_HASH_FIELD_NUMBER = 7; private com.google.protobuf.ByteString sdHash_; /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ public boolean hasSdHash() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ public com.google.protobuf.ByteString getSdHash() { return sdHash_; @@ -29044,10 +30954,6 @@ public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasSdHash()) { - memoizedIsInitialized = 0; - return false; - } if (hasSdParameters()) { if (!getSdParameters().isInitialized()) { memoizedIsInitialized = 0; @@ -29547,10 +31453,6 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr } public final boolean isInitialized() { - if (!hasSdHash()) { - - return false; - } if (hasSdParameters()) { if (!getSdParameters().isInitialized()) { @@ -29997,22 +31899,22 @@ public Builder clearSdParameters() { return sdParametersBuilder_; } - // required bytes sd_hash = 7; + // optional bytes sd_hash = 7; private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ public boolean hasSdHash() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ public com.google.protobuf.ByteString getSdHash() { return sdHash_; } /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ public Builder setSdHash(com.google.protobuf.ByteString value) { if (value == null) { @@ -30024,7 +31926,7 @@ public Builder setSdHash(com.google.protobuf.ByteString value) { return this; } /** - * required bytes sd_hash = 7; + * optional bytes sd_hash = 7; */ public Builder clearSdHash() { bitField0_ = (bitField0_ & ~0x00000040); @@ -36442,6 +38344,16 @@ public Builder removeRange(int index) { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -36621,7 +38533,7 @@ public Builder removeRange(int index) { "grStatsInvalidatorFilter.Entry\022\021\n\trun_ev" + "ery\030\002 \002(\003\022\034\n\024max_cache_entry_life\030\003 \002(\003\032" + "?\n\005Entry\022\017\n\007db_name\030\001 \002(\014\022\022\n\ntable_name\030" + - "\002 \002(\014\022\021\n\tpart_name\030\003 \002(\014\"\362\010\n\013ColumnStats" + + "\002 \002(\014\022\021\n\tpart_name\030\003 \002(\014\"\216\013\n\013ColumnStats" + "\022\025\n\rlast_analyzed\030\001 \001(\003\022\023\n\013column_type\030\002" + " \002(\t\022\021\n\tnum_nulls\030\003 \001(\003\022\033\n\023num_distinct_" + "values\030\004 \001(\003\022T\n\nbool_stats\030\005 \001(\0132@.org.a", @@ -36637,141 +38549,153 @@ public Builder removeRange(int index) { "ve.metastore.hbase.ColumnStats.StringSta", "ts\022W\n\rdecimal_stats\030\n \001(\0132@.org.apache.h" + "adoop.hive.metastore.hbase.ColumnStats.D" + - "ecimalStats\022\023\n\013column_name\030\013 \001(\t\022\023\n\013bit_" + - "vectors\030\014 \001(\t\0325\n\014BooleanStats\022\021\n\tnum_tru" + - "es\030\001 \001(\003\022\022\n\nnum_falses\030\002 \001(\003\0322\n\tLongStat" + - "s\022\021\n\tlow_value\030\001 \001(\022\022\022\n\nhigh_value\030\002 \001(\022" + - "\0324\n\013DoubleStats\022\021\n\tlow_value\030\001 \001(\001\022\022\n\nhi" + - "gh_value\030\002 \001(\001\032=\n\013StringStats\022\026\n\016max_col" + - "_length\030\001 \001(\003\022\026\n\016avg_col_length\030\002 \001(\001\032\365\001" + - "\n\014DecimalStats\022[\n\tlow_value\030\001 \001(\0132H.org.", - "apache.hadoop.hive.metastore.hbase.Colum" + - "nStats.DecimalStats.Decimal\022\\\n\nhigh_valu" + - "e\030\002 \001(\0132H.org.apache.hadoop.hive.metasto" + - "re.hbase.ColumnStats.DecimalStats.Decima" + - "l\032*\n\007Decimal\022\020\n\010unscaled\030\001 \002(\014\022\r\n\005scale\030" + - "\002 \002(\005\"\246\002\n\010Database\022\023\n\013description\030\001 \001(\t\022" + - "\013\n\003uri\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.org.a" + - "pache.hadoop.hive.metastore.hbase.Parame" + - "ters\022Q\n\nprivileges\030\004 \001(\0132=.org.apache.ha" + - "doop.hive.metastore.hbase.PrincipalPrivi", - "legeSet\022\022\n\nowner_name\030\005 \001(\t\022I\n\nowner_typ" + - "e\030\006 \001(\01625.org.apache.hadoop.hive.metasto" + - "re.hbase.PrincipalType\"$\n\017DelegationToke" + - "n\022\021\n\ttoken_str\030\001 \002(\t\":\n\013FieldSchema\022\014\n\004n" + - "ame\030\001 \002(\t\022\014\n\004type\030\002 \002(\t\022\017\n\007comment\030\003 \001(\t" + - "\"\206\004\n\010Function\022\022\n\nclass_name\030\001 \001(\t\022\022\n\nown" + - "er_name\030\002 \001(\t\022I\n\nowner_type\030\003 \001(\01625.org." + - "apache.hadoop.hive.metastore.hbase.Princ" + - "ipalType\022\023\n\013create_time\030\004 \001(\022\022T\n\rfunctio" + - "n_type\030\005 \001(\0162=.org.apache.hadoop.hive.me", - "tastore.hbase.Function.FunctionType\022S\n\rr" + - "esource_uris\030\006 \003(\0132<.org.apache.hadoop.h" + - "ive.metastore.hbase.Function.ResourceUri" + - "\032\254\001\n\013ResourceUri\022`\n\rresource_type\030\001 \002(\0162" + - "I.org.apache.hadoop.hive.metastore.hbase" + - ".Function.ResourceUri.ResourceType\022\013\n\003ur" + - "i\030\002 \002(\t\".\n\014ResourceType\022\007\n\003JAR\020\001\022\010\n\004FILE" + - "\020\002\022\013\n\007ARCHIVE\020\003\"\030\n\014FunctionType\022\010\n\004JAVA\020" + - "\001\"\037\n\tMasterKey\022\022\n\nmaster_key\030\001 \002(\t\",\n\016Pa" + - "rameterEntry\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t", - "\"W\n\nParameters\022I\n\tparameter\030\001 \003(\01326.org." + - "apache.hadoop.hive.metastore.hbase.Param" + - "eterEntry\"\360\001\n\tPartition\022\023\n\013create_time\030\001" + - " \001(\003\022\030\n\020last_access_time\030\002 \001(\003\022\020\n\010locati" + - "on\030\003 \001(\t\022I\n\rsd_parameters\030\004 \001(\01322.org.ap" + - "ache.hadoop.hive.metastore.hbase.Paramet" + - "ers\022\017\n\007sd_hash\030\005 \002(\014\022F\n\nparameters\030\006 \001(\013" + - "22.org.apache.hadoop.hive.metastore.hbas" + - "e.Parameters\"\204\001\n\032PrincipalPrivilegeSetEn" + - "try\022\026\n\016principal_name\030\001 \002(\t\022N\n\nprivilege", - "s\030\002 \003(\0132:.org.apache.hadoop.hive.metasto" + - "re.hbase.PrivilegeGrantInfo\"\275\001\n\025Principa" + - "lPrivilegeSet\022Q\n\005users\030\001 \003(\0132B.org.apach" + + "ecimalStats\022Q\n\ndate_stats\030\013 \001(\0132=.org.ap" + + "ache.hadoop.hive.metastore.hbase.ColumnS" + + "tats.DateStats\022\023\n\013column_name\030\014 \001(\t\022\023\n\013b" + + "it_vectors\030\r \001(\t\0325\n\014BooleanStats\022\021\n\tnum_" + + "trues\030\001 \001(\003\022\022\n\nnum_falses\030\002 \001(\003\0322\n\tLongS" + + "tats\022\021\n\tlow_value\030\001 \001(\022\022\022\n\nhigh_value\030\002 " + + "\001(\022\0324\n\013DoubleStats\022\021\n\tlow_value\030\001 \001(\001\022\022\n" + + "\nhigh_value\030\002 \001(\001\032\036\n\004Date\022\026\n\016daysSinceEp", + "och\030\001 \002(\003\032\246\001\n\tDateStats\022K\n\tlow_value\030\001 \001" + + "(\01328.org.apache.hadoop.hive.metastore.hb" + + "ase.ColumnStats.Date\022L\n\nhigh_value\030\002 \001(\013" + + "28.org.apache.hadoop.hive.metastore.hbas" + + "e.ColumnStats.Date\032=\n\013StringStats\022\026\n\016max" + + "_col_length\030\001 \001(\003\022\026\n\016avg_col_length\030\002 \001(" + + "\001\032\365\001\n\014DecimalStats\022[\n\tlow_value\030\001 \001(\0132H." + + "org.apache.hadoop.hive.metastore.hbase.C" + + "olumnStats.DecimalStats.Decimal\022\\\n\nhigh_" + + "value\030\002 \001(\0132H.org.apache.hadoop.hive.met", + "astore.hbase.ColumnStats.DecimalStats.De" + + "cimal\032*\n\007Decimal\022\020\n\010unscaled\030\001 \002(\014\022\r\n\005sc" + + "ale\030\002 \002(\005\"\246\002\n\010Database\022\023\n\013description\030\001 " + + "\001(\t\022\013\n\003uri\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.o" + + "rg.apache.hadoop.hive.metastore.hbase.Pa" + + "rameters\022Q\n\nprivileges\030\004 \001(\0132=.org.apach" + "e.hadoop.hive.metastore.hbase.PrincipalP" + - "rivilegeSetEntry\022Q\n\005roles\030\002 \003(\0132B.org.ap" + - "ache.hadoop.hive.metastore.hbase.Princip" + - "alPrivilegeSetEntry\"\260\001\n\022PrivilegeGrantIn" + - "fo\022\021\n\tprivilege\030\001 \001(\t\022\023\n\013create_time\030\002 \001" + - "(\003\022\017\n\007grantor\030\003 \001(\t\022K\n\014grantor_type\030\004 \001(" + - "\01625.org.apache.hadoop.hive.metastore.hba", - "se.PrincipalType\022\024\n\014grant_option\030\005 \001(\010\"\374" + - "\001\n\rRoleGrantInfo\022\026\n\016principal_name\030\001 \002(\t" + - "\022M\n\016principal_type\030\002 \002(\01625.org.apache.ha" + - "doop.hive.metastore.hbase.PrincipalType\022" + - "\020\n\010add_time\030\003 \001(\003\022\017\n\007grantor\030\004 \001(\t\022K\n\014gr" + - "antor_type\030\005 \001(\01625.org.apache.hadoop.hiv" + - "e.metastore.hbase.PrincipalType\022\024\n\014grant" + - "_option\030\006 \001(\010\"^\n\021RoleGrantInfoList\022I\n\ngr" + - "ant_info\030\001 \003(\01325.org.apache.hadoop.hive." + - "metastore.hbase.RoleGrantInfo\"\030\n\010RoleLis", - "t\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013create_time\030\001" + - " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n\021StorageDesc" + - "riptor\022A\n\004cols\030\001 \003(\01323.org.apache.hadoop" + - ".hive.metastore.hbase.FieldSchema\022\024\n\014inp" + - "ut_format\030\002 \001(\t\022\025\n\routput_format\030\003 \001(\t\022\025" + - "\n\ris_compressed\030\004 \001(\010\022\023\n\013num_buckets\030\005 \001" + - "(\021\022W\n\nserde_info\030\006 \001(\0132C.org.apache.hado" + + "rivilegeSet\022\022\n\nowner_name\030\005 \001(\t\022I\n\nowner" + + "_type\030\006 \001(\01625.org.apache.hadoop.hive.met" + + "astore.hbase.PrincipalType\"$\n\017Delegation", + "Token\022\021\n\ttoken_str\030\001 \002(\t\":\n\013FieldSchema\022" + + "\014\n\004name\030\001 \002(\t\022\014\n\004type\030\002 \002(\t\022\017\n\007comment\030\003" + + " \001(\t\"\206\004\n\010Function\022\022\n\nclass_name\030\001 \001(\t\022\022\n" + + "\nowner_name\030\002 \001(\t\022I\n\nowner_type\030\003 \001(\01625." + + "org.apache.hadoop.hive.metastore.hbase.P" + + "rincipalType\022\023\n\013create_time\030\004 \001(\022\022T\n\rfun" + + "ction_type\030\005 \001(\0162=.org.apache.hadoop.hiv" + + "e.metastore.hbase.Function.FunctionType\022" + + "S\n\rresource_uris\030\006 \003(\0132<.org.apache.hado" + + "op.hive.metastore.hbase.Function.Resourc", + "eUri\032\254\001\n\013ResourceUri\022`\n\rresource_type\030\001 " + + "\002(\0162I.org.apache.hadoop.hive.metastore.h" + + "base.Function.ResourceUri.ResourceType\022\013" + + "\n\003uri\030\002 \002(\t\".\n\014ResourceType\022\007\n\003JAR\020\001\022\010\n\004" + + "FILE\020\002\022\013\n\007ARCHIVE\020\003\"\030\n\014FunctionType\022\010\n\004J" + + "AVA\020\001\"\037\n\tMasterKey\022\022\n\nmaster_key\030\001 \002(\t\"," + + "\n\016ParameterEntry\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002" + + " \002(\t\"W\n\nParameters\022I\n\tparameter\030\001 \003(\01326." + + "org.apache.hadoop.hive.metastore.hbase.P" + + "arameterEntry\"\303\002\n\tPartition\022\023\n\013create_ti", + "me\030\001 \001(\003\022\030\n\020last_access_time\030\002 \001(\003\022\020\n\010lo" + + "cation\030\003 \001(\t\022I\n\rsd_parameters\030\004 \001(\01322.or" + + "g.apache.hadoop.hive.metastore.hbase.Par" + + "ameters\022\017\n\007sd_hash\030\005 \001(\014\022F\n\nparameters\030\006" + + " \001(\01322.org.apache.hadoop.hive.metastore." + + "hbase.Parameters\022Q\n\nprivileges\030\007 \001(\0132=.o" + + "rg.apache.hadoop.hive.metastore.hbase.Pr" + + "incipalPrivilegeSet\"\204\001\n\032PrincipalPrivile" + + "geSetEntry\022\026\n\016principal_name\030\001 \002(\t\022N\n\npr" + + "ivileges\030\002 \003(\0132:.org.apache.hadoop.hive.", + "metastore.hbase.PrivilegeGrantInfo\"\221\002\n\025P" + + "rincipalPrivilegeSet\022Q\n\005users\030\001 \003(\0132B.or" + + "g.apache.hadoop.hive.metastore.hbase.Pri" + + "ncipalPrivilegeSetEntry\022R\n\006groups\030\002 \003(\0132" + + "B.org.apache.hadoop.hive.metastore.hbase" + + ".PrincipalPrivilegeSetEntry\022Q\n\005roles\030\003 \003" + + "(\0132B.org.apache.hadoop.hive.metastore.hb" + + "ase.PrincipalPrivilegeSetEntry\"\260\001\n\022Privi" + + "legeGrantInfo\022\021\n\tprivilege\030\001 \001(\t\022\023\n\013crea" + + "te_time\030\002 \001(\003\022\017\n\007grantor\030\003 \001(\t\022K\n\014granto", + "r_type\030\004 \001(\01625.org.apache.hadoop.hive.me" + + "tastore.hbase.PrincipalType\022\024\n\014grant_opt" + + "ion\030\005 \001(\010\"\374\001\n\rRoleGrantInfo\022\026\n\016principal" + + "_name\030\001 \002(\t\022M\n\016principal_type\030\002 \002(\01625.or" + + "g.apache.hadoop.hive.metastore.hbase.Pri" + + "ncipalType\022\020\n\010add_time\030\003 \001(\003\022\017\n\007grantor\030" + + "\004 \001(\t\022K\n\014grantor_type\030\005 \001(\01625.org.apache" + + ".hadoop.hive.metastore.hbase.PrincipalTy" + + "pe\022\024\n\014grant_option\030\006 \001(\010\"^\n\021RoleGrantInf" + + "oList\022I\n\ngrant_info\030\001 \003(\01325.org.apache.h", + "adoop.hive.metastore.hbase.RoleGrantInfo" + + "\"\030\n\010RoleList\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013cr" + + "eate_time\030\001 \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n\021" + + "StorageDescriptor\022A\n\004cols\030\001 \003(\01323.org.ap" + + "ache.hadoop.hive.metastore.hbase.FieldSc" + + "hema\022\024\n\014input_format\030\002 \001(\t\022\025\n\routput_for" + + "mat\030\003 \001(\t\022\025\n\ris_compressed\030\004 \001(\010\022\023\n\013num_" + + "buckets\030\005 \001(\021\022W\n\nserde_info\030\006 \001(\0132C.org." + + "apache.hadoop.hive.metastore.hbase.Stora" + + "geDescriptor.SerDeInfo\022\023\n\013bucket_cols\030\007 ", + "\003(\t\022R\n\tsort_cols\030\010 \003(\0132?.org.apache.hado" + "op.hive.metastore.hbase.StorageDescripto" + - "r.SerDeInfo\022\023\n\013bucket_cols\030\007 \003(\t\022R\n\tsort" + - "_cols\030\010 \003(\0132?.org.apache.hadoop.hive.met", - "astore.hbase.StorageDescriptor.Order\022Y\n\013" + - "skewed_info\030\t \001(\0132D.org.apache.hadoop.hi" + - "ve.metastore.hbase.StorageDescriptor.Ske" + - "wedInfo\022!\n\031stored_as_sub_directories\030\n \001" + - "(\010\032.\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" + - "r\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004name\030\001 \001(\t\022\031\n" + - "\021serialization_lib\030\002 \001(\t\022F\n\nparameters\030\003" + + "r.Order\022Y\n\013skewed_info\030\t \001(\0132D.org.apach" + + "e.hadoop.hive.metastore.hbase.StorageDes" + + "criptor.SkewedInfo\022!\n\031stored_as_sub_dire" + + "ctories\030\n \001(\010\032.\n\005Order\022\023\n\013column_name\030\001 " + + "\002(\t\022\020\n\005order\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004na" + + "me\030\001 \001(\t\022\031\n\021serialization_lib\030\002 \001(\t\022F\n\np" + + "arameters\030\003 \001(\01322.org.apache.hadoop.hive" + + ".metastore.hbase.Parameters\032\214\003\n\nSkewedIn", + "fo\022\030\n\020skewed_col_names\030\001 \003(\t\022r\n\021skewed_c" + + "ol_values\030\002 \003(\0132W.org.apache.hadoop.hive" + + ".metastore.hbase.StorageDescriptor.Skewe" + + "dInfo.SkewedColValueList\022\206\001\n\036skewed_col_" + + "value_location_maps\030\003 \003(\0132^.org.apache.h" + + "adoop.hive.metastore.hbase.StorageDescri" + + "ptor.SkewedInfo.SkewedColValueLocationMa" + + "p\032.\n\022SkewedColValueList\022\030\n\020skewed_col_va" + + "lue\030\001 \003(\t\0327\n\031SkewedColValueLocationMap\022\013" + + "\n\003key\030\001 \003(\t\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005", + "owner\030\001 \001(\t\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last" + + "_access_time\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n" + + "\010location\030\005 \001(\t\022I\n\rsd_parameters\030\006 \001(\01322" + + ".org.apache.hadoop.hive.metastore.hbase." + + "Parameters\022\017\n\007sd_hash\030\007 \001(\014\022K\n\016partition" + + "_keys\030\010 \003(\01323.org.apache.hadoop.hive.met" + + "astore.hbase.FieldSchema\022F\n\nparameters\030\t" + " \001(\01322.org.apache.hadoop.hive.metastore." + - "hbase.Parameters\032\214\003\n\nSkewedInfo\022\030\n\020skewe" + - "d_col_names\030\001 \003(\t\022r\n\021skewed_col_values\030\002", - " \003(\0132W.org.apache.hadoop.hive.metastore." + - "hbase.StorageDescriptor.SkewedInfo.Skewe" + - "dColValueList\022\206\001\n\036skewed_col_value_locat" + - "ion_maps\030\003 \003(\0132^.org.apache.hadoop.hive." + - "metastore.hbase.StorageDescriptor.Skewed" + - "Info.SkewedColValueLocationMap\032.\n\022Skewed" + - "ColValueList\022\030\n\020skewed_col_value\030\001 \003(\t\0327" + - "\n\031SkewedColValueLocationMap\022\013\n\003key\030\001 \003(\t" + - "\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005owner\030\001 \001(\t" + - "\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last_access_tim", - "e\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n\010location\030\005" + - " \001(\t\022I\n\rsd_parameters\030\006 \001(\01322.org.apache" + - ".hadoop.hive.metastore.hbase.Parameters\022" + - "\017\n\007sd_hash\030\007 \002(\014\022K\n\016partition_keys\030\010 \003(\013" + - "23.org.apache.hadoop.hive.metastore.hbas" + - "e.FieldSchema\022F\n\nparameters\030\t \001(\01322.org." + - "apache.hadoop.hive.metastore.hbase.Param" + - "eters\022\032\n\022view_original_text\030\n \001(\t\022\032\n\022vie" + - "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" + - "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo", - "p.hive.metastore.hbase.PrincipalPrivileg" + - "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\334\002\n\005Index\022\031\n\021" + - "indexHandlerClass\030\001 \001(\t\022\016\n\006dbName\030\002 \002(\t\022" + - "\025\n\rorigTableName\030\003 \002(\t\022\020\n\010location\030\004 \001(\t" + - "\022I\n\rsd_parameters\030\005 \001(\01322.org.apache.had" + - "oop.hive.metastore.hbase.Parameters\022\022\n\nc" + - "reateTime\030\006 \001(\005\022\026\n\016lastAccessTime\030\007 \001(\005\022" + - "\026\n\016indexTableName\030\010 \001(\t\022\017\n\007sd_hash\030\t \001(\014" + - "\022F\n\nparameters\030\n \001(\01322.org.apache.hadoop" + - ".hive.metastore.hbase.Parameters\022\027\n\017defe", - "rredRebuild\030\013 \001(\010\"\353\004\n\026PartitionKeyCompar" + - "ator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op" + - "\030\003 \003(\0132G.org.apache.hadoop.hive.metastor" + - "e.hbase.PartitionKeyComparator.Operator\022" + - "S\n\005range\030\004 \003(\0132D.org.apache.hadoop.hive." + - "metastore.hbase.PartitionKeyComparator.R" + - "ange\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive" + - "\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002" + - " \001(\0132C.org.apache.hadoop.hive.metastore." + - "hbase.PartitionKeyComparator.Mark\022P\n\003end", - "\030\003 \001(\0132C.org.apache.hadoop.hive.metastor" + - "e.hbase.PartitionKeyComparator.Mark\032\241\001\n\010" + - "Operator\022Z\n\004type\030\001 \002(\0162L.org.apache.hado" + - "op.hive.metastore.hbase.PartitionKeyComp" + - "arator.Operator.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val" + - "\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001*" + - "#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" + "hbase.Parameters\022\032\n\022view_original_text\030\n" + + " \001(\t\022\032\n\022view_expanded_text\030\013 \001(\t\022\022\n\ntabl", + "e_type\030\014 \001(\t\022Q\n\nprivileges\030\r \001(\0132=.org.a" + + "pache.hadoop.hive.metastore.hbase.Princi" + + "palPrivilegeSet\022\024\n\014is_temporary\030\016 \001(\010\"\334\002" + + "\n\005Index\022\031\n\021indexHandlerClass\030\001 \001(\t\022\016\n\006db" + + "Name\030\002 \002(\t\022\025\n\rorigTableName\030\003 \002(\t\022\020\n\010loc" + + "ation\030\004 \001(\t\022I\n\rsd_parameters\030\005 \001(\01322.org" + + ".apache.hadoop.hive.metastore.hbase.Para" + + "meters\022\022\n\ncreateTime\030\006 \001(\005\022\026\n\016lastAccess" + + "Time\030\007 \001(\005\022\026\n\016indexTableName\030\010 \001(\t\022\017\n\007sd" + + "_hash\030\t \001(\014\022F\n\nparameters\030\n \001(\01322.org.ap", + "ache.hadoop.hive.metastore.hbase.Paramet" + + "ers\022\027\n\017deferredRebuild\030\013 \001(\010\"\353\004\n\026Partiti" + + "onKeyComparator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030" + + "\002 \002(\t\022S\n\002op\030\003 \003(\0132G.org.apache.hadoop.hi" + + "ve.metastore.hbase.PartitionKeyComparato" + + "r.Operator\022S\n\005range\030\004 \003(\0132D.org.apache.h" + + "adoop.hive.metastore.hbase.PartitionKeyC" + + "omparator.Range\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021" + + "\n\tinclusive\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t" + + "\022R\n\005start\030\002 \001(\0132C.org.apache.hadoop.hive", + ".metastore.hbase.PartitionKeyComparator." + + "Mark\022P\n\003end\030\003 \001(\0132C.org.apache.hadoop.hi" + + "ve.metastore.hbase.PartitionKeyComparato" + + "r.Mark\032\241\001\n\010Operator\022Z\n\004type\030\001 \002(\0162L.org." + + "apache.hadoop.hive.metastore.hbase.Parti" + + "tionKeyComparator.Operator.Type\022\013\n\003key\030\002" + + " \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tN" + + "OTEQUALS\020\001*.\n\rPrincipalType\022\010\n\004USER\020\000\022\t\n" + + "\005GROUP\020\001\022\010\n\004ROLE\020\002" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -36813,7 +38737,7 @@ public Builder removeRange(int index) { internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor, - new java.lang.String[] { "LastAnalyzed", "ColumnType", "NumNulls", "NumDistinctValues", "BoolStats", "LongStats", "DoubleStats", "StringStats", "BinaryStats", "DecimalStats", "ColumnName", "BitVectors", }); + new java.lang.String[] { "LastAnalyzed", "ColumnType", "NumNulls", "NumDistinctValues", "BoolStats", "LongStats", "DoubleStats", "StringStats", "BinaryStats", "DecimalStats", "DateStats", "ColumnName", "BitVectors", }); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor = internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(0); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable = new @@ -36832,14 +38756,26 @@ public Builder removeRange(int index) { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor, new java.lang.String[] { "LowValue", "HighValue", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_descriptor = internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(3); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_Date_descriptor, + new java.lang.String[] { "DaysSinceEpoch", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(4); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DateStats_descriptor, + new java.lang.String[] { "LowValue", "HighValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(5); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor, new java.lang.String[] { "MaxColLength", "AvgColLength", }); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(4); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(6); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor, @@ -36903,7 +38839,7 @@ public Builder removeRange(int index) { internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor, - new java.lang.String[] { "CreateTime", "LastAccessTime", "Location", "SdParameters", "SdHash", "Parameters", }); + new java.lang.String[] { "CreateTime", "LastAccessTime", "Location", "SdParameters", "SdHash", "Parameters", "Privileges", }); internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable = new @@ -36915,7 +38851,7 @@ public Builder removeRange(int index) { internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor, - new java.lang.String[] { "Users", "Roles", }); + new java.lang.String[] { "Users", "Groups", "Roles", }); internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable = new diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 68c6e44..8be31dd 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -139,6 +139,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, if(MetaStoreUtils.isCascadeNeededInAlterTable(oldt, newt)) { List parts = msdb.getPartitions(dbname, name, -1); for (Partition part : parts) { + part = part.deepCopy(); List oldCols = part.getSd().getCols(); part.getSd().setCols(newt.getSd().getCols()); String oldPartName = Warehouse.makePartName(oldt.getPartitionKeys(), part.getValues()); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java index 9762309..a777b14 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java @@ -161,6 +161,12 @@ public FilterPlan or(FilterPlan other) { */ public static class ScanPlan extends FilterPlan { + private String defaultPartitionName; + + public ScanPlan(String defaultPartitionName) { + this.defaultPartitionName = defaultPartitionName; + } + public static class ScanMarker { final String value; /** @@ -219,6 +225,15 @@ public ScanMarkerPair(ScanMarker startMarker, ScanMarker endMarker) { // represent Scan start, partition key name -> scanMarkerPair Map markers = new HashMap(); List ops = new ArrayList(); + boolean unknown = false; + + public boolean hasUnknown() { + return unknown; + } + + public void setUnknown(boolean unknown) { + this.unknown = unknown; + } // Get the number of partition key prefixes which can be used in the scan range. // For example, if partition key is (year, month, state) @@ -230,6 +245,9 @@ public ScanMarkerPair(ScanMarker startMarker, ScanMarker endMarker) { // nothing can be used in scan range, majorParts = 0 private int getMajorPartsCount(List parts) { int majorPartsCount = 0; + if (hasUnknown()) { + return majorPartsCount; + } while (majorPartsCount parts) { return majorPartsCount; } public Filter getFilter(List parts) { + if (hasUnknown()) { + return null; + } int majorPartsCount = getMajorPartsCount(parts); Set majorKeys = new HashSet(); for (int i=0;i parts) { } PartitionKeyComparator.Mark endMark = null; if (entry.getValue().endMarker != null) { - startMark = new PartitionKeyComparator.Mark(entry.getValue().endMarker.value, + endMark = new PartitionKeyComparator.Mark(entry.getValue().endMarker.value, entry.getValue().endMarker.isInclusive); } PartitionKeyComparator.Range range = new PartitionKeyComparator.Range( @@ -280,6 +301,9 @@ public Filter getFilter(List parts) { } public void setStartMarker(String keyName, String keyType, String start, boolean isInclusive) { + if (hasUnknown()) { + return; + } if (markers.containsKey(keyName)) { markers.get(keyName).startMarker = new ScanMarker(start, isInclusive, keyType); } else { @@ -289,6 +313,9 @@ public void setStartMarker(String keyName, String keyType, String start, boolean } public ScanMarker getStartMarker(String keyName) { + if (hasUnknown()) { + return null; + } if (markers.containsKey(keyName)) { return markers.get(keyName).startMarker; } else { @@ -297,6 +324,9 @@ public ScanMarker getStartMarker(String keyName) { } public void setEndMarker(String keyName, String keyType, String end, boolean isInclusive) { + if (hasUnknown()) { + return; + } if (markers.containsKey(keyName)) { markers.get(keyName).endMarker = new ScanMarker(end, isInclusive, keyType); } else { @@ -306,6 +336,9 @@ public void setEndMarker(String keyName, String keyType, String end, boolean isI } public ScanMarker getEndMarker(String keyName) { + if (hasUnknown()) { + return null; + } if (markers.containsKey(keyName)) { return markers.get(keyName).endMarker; } else { @@ -324,21 +357,28 @@ public FilterPlan and(FilterPlan other) { private ScanPlan and(ScanPlan other) { // create combined FilterPlan based on existing lhs and rhs plan - ScanPlan newPlan = new ScanPlan(); - newPlan.markers.putAll(markers); + ScanPlan newPlan = new ScanPlan(defaultPartitionName); + if (hasUnknown() || other.hasUnknown()) { + newPlan.setUnknown(true); + return newPlan; + } + for (Map.Entry entry : markers.entrySet()) { + newPlan.markers.put(entry.getKey(), new ScanMarkerPair( + entry.getValue().startMarker, entry.getValue().endMarker)); + } for (String keyName : other.markers.keySet()) { if (newPlan.markers.containsKey(keyName)) { // create new scan start ScanMarker greaterStartMarker = getComparedMarker(this.getStartMarker(keyName), - other.getStartMarker(keyName), true); + other.getStartMarker(keyName), true, defaultPartitionName); if (greaterStartMarker != null) { newPlan.setStartMarker(keyName, greaterStartMarker.type, greaterStartMarker.value, greaterStartMarker.isInclusive); } // create new scan end ScanMarker lesserEndMarker = getComparedMarker(this.getEndMarker(keyName), other.getEndMarker(keyName), - false); + false, defaultPartitionName); if (lesserEndMarker != null) { newPlan.setEndMarker(keyName, lesserEndMarker.type, lesserEndMarker.value, lesserEndMarker.isInclusive); } @@ -360,13 +400,27 @@ private ScanPlan and(ScanPlan other) { */ @VisibleForTesting static ScanMarker getComparedMarker(ScanMarker lStartMarker, ScanMarker rStartMarker, - boolean getGreater) { + boolean getGreater, String defaultPartitionName) { // if one of them has null bytes, just return other if(lStartMarker == null) { return rStartMarker; } else if (rStartMarker == null) { return lStartMarker; } + if (lStartMarker != null && lStartMarker.value != null && lStartMarker.value.equals(defaultPartitionName)) { + if (getGreater) { + return rStartMarker; + } else { + return lStartMarker; + } + } + if (rStartMarker != null && rStartMarker.value != null && rStartMarker.value.equals(defaultPartitionName)) { + if (getGreater) { + return lStartMarker; + } else { + return rStartMarker; + } + } TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(lStartMarker.type); ObjectInspector outputOI = @@ -394,10 +448,10 @@ static ScanMarker getComparedMarker(ScanMarker lStartMarker, ScanMarker rStartMa return new ScanMarker(lStartMarker.value, isInclusive, lStartMarker.type); } if (getGreater) { - return compareRes == 1 ? lStartMarker : rStartMarker; + return compareRes > 0 ? lStartMarker : rStartMarker; } // else - return compareRes == -1 ? lStartMarker : rStartMarker; + return compareRes < 0 ? lStartMarker : rStartMarker; } @@ -417,7 +471,8 @@ public FilterPlan or(FilterPlan other) { /** * @return row suffix - This is appended to db + table, to generate start row for the Scan */ - public byte[] getStartRowSuffix(String dbName, String tableName, List parts) { + public byte[] getStartRowSuffix(String dbName, String tableName, List parts, + String defaultPartitionName) { int majorPartsCount = getMajorPartsCount(parts); List majorPartTypes = new ArrayList(); List components = new ArrayList(); @@ -437,14 +492,16 @@ public FilterPlan or(FilterPlan other) { } } } - byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, endPrefix); + byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, + endPrefix, defaultPartitionName); return bytes; } /** * @return row suffix - This is appended to db + table, to generate end row for the Scan */ - public byte[] getEndRowSuffix(String dbName, String tableName, List parts) { + public byte[] getEndRowSuffix(String dbName, String tableName, List parts, + String defaultPartitionName) { int majorPartsCount = getMajorPartsCount(parts); List majorPartTypes = new ArrayList(); List components = new ArrayList(); @@ -464,7 +521,8 @@ public FilterPlan or(FilterPlan other) { } } } - byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, endPrefix); + byte[] bytes = HBaseUtils.buildPartitionKey(dbName, tableName, majorPartTypes, components, + endPrefix, defaultPartitionName); if (components.isEmpty()) { bytes[bytes.length-1]++; } @@ -507,10 +565,13 @@ public String toString() { private Map nameToType = new HashMap(); - public PartitionFilterGenerator(List parts) { + private String defaultPartitionName; + + public PartitionFilterGenerator(List parts, String defaultPartitionName) { for (FieldSchema part : parts) { nameToType.put(part.getName(), part.getType()); } + this.defaultPartitionName = defaultPartitionName; } FilterPlan getPlan() { @@ -551,8 +612,13 @@ protected void endTreeNode(TreeNode node) throws MetaException { @Override public void visit(LeafNode node) throws MetaException { - ScanPlan leafPlan = new ScanPlan(); + ScanPlan leafPlan = new ScanPlan(defaultPartitionName); curPlan = leafPlan; + if (!nameToType.containsKey(node.keyName)) { + leafPlan.setUnknown(true); + hasUnsupportedCondition = true; + return; + } // this is a condition on first partition column, so might influence the // start and end of the scan @@ -599,12 +665,13 @@ private boolean hasUnsupportedCondition() { } } - public static PlanResult getFilterPlan(ExpressionTree exprTree, List parts) throws MetaException { + public static PlanResult getFilterPlan(ExpressionTree exprTree, List parts, + String defaultPartitionName) throws MetaException { if (exprTree == null) { // TODO: if exprTree is null, we should do what ObjectStore does. See HIVE-10102 - return new PlanResult(new ScanPlan(), true); + return new PlanResult(new ScanPlan(defaultPartitionName), true); } - PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(parts); + PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(parts, defaultPartitionName); exprTree.accept(pGenerator); return new PlanResult(pGenerator.getPlan(), pGenerator.hasUnsupportedCondition()); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java index d503cff..8deed34 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterators; + import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; @@ -39,10 +40,12 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.filter.RegexStringComparator; import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -51,6 +54,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; @@ -93,6 +97,7 @@ final static String DB_TABLE = "HBMS_DBS"; final static String FUNC_TABLE = "HBMS_FUNCS"; final static String GLOBAL_PRIVS_TABLE = "HBMS_GLOBAL_PRIVS"; + final static String COLUMN_PRIVS_TABLE = "HBMS_COLUMN_PRIVS"; final static String PART_TABLE = "HBMS_PARTITIONS"; final static String ROLE_TABLE = "HBMS_ROLES"; final static String SD_TABLE = "HBMS_SDS"; @@ -105,11 +110,14 @@ final static byte[] CATALOG_CF = "c".getBytes(HBaseUtils.ENCODING); final static byte[] STATS_CF = "s".getBytes(HBaseUtils.ENCODING); final static String NO_CACHE_CONF = "no.use.cache"; + static enum ColumnPrivilegeType { + TABLE, PARTITION + } /** * List of tables in HBase */ public final static String[] tableNames = { AGGR_STATS_TABLE, DB_TABLE, FUNC_TABLE, - GLOBAL_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, + GLOBAL_PRIVS_TABLE, COLUMN_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, SECURITY_TABLE, SEQUENCES_TABLE, TABLE_TABLE, INDEX_TABLE, FILE_METADATA_TABLE }; public final static Map> columnFamilies = new HashMap<> (tableNames.length); @@ -119,6 +127,7 @@ columnFamilies.put(DB_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(FUNC_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(GLOBAL_PRIVS_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(COLUMN_PRIVS_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(PART_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); columnFamilies.put(USER_TO_ROLE_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(ROLE_TABLE, Arrays.asList(CATALOG_CF)); @@ -608,7 +617,8 @@ Partition getPartition(String dbName, String tableName, List partVals) List parts = new ArrayList<>(partValLists.size()); List gets = new ArrayList<>(partValLists.size()); for (List partVals : partValLists) { - byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Get get = new Get(key); get.addColumn(CATALOG_CF, CATALOG_COL); gets.add(get); @@ -637,7 +647,8 @@ Partition getPartition(String dbName, String tableName, List partVals) void putPartition(Partition partition) throws IOException { byte[] hash = putStorageDescriptor(partition.getSd()); byte[][] serialized = HBaseUtils.serializePartition(partition, - HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()), hash); + HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()), + hash, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); partCache.put(partition.getDbName(), partition.getTableName(), partition); } @@ -659,11 +670,14 @@ void replacePartition(Partition oldPart, Partition newPart, List partTyp hash = putStorageDescriptor(newPart.getSd()); } byte[][] serialized = HBaseUtils.serializePartition(newPart, - HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); + HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), + hash, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); partCache.put(newPart.getDbName(), newPart.getTableName(), newPart); - if (!oldPart.getTableName().equals(newPart.getTableName())) { - deletePartition(oldPart.getDbName(), oldPart.getTableName(), partTypes, oldPart.getValues()); + if (!(oldPart.getDbName().equals(newPart.getDbName()) && + oldPart.getTableName().equals(newPart.getTableName()) && + oldPart.getValues().equals(newPart.getValues()))) { + deletePartition(oldPart.getDbName(), oldPart.getTableName(), partTypes, oldPart.getValues(), false); } } @@ -679,7 +693,8 @@ void putPartitions(List partitions) throws IOException { byte[] hash = putStorageDescriptor(partition.getSd()); List partTypes = HBaseUtils.getPartitionKeyTypes( getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()); - byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash); + byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Put p = new Put(serialized[0]); p.add(CATALOG_CF, CATALOG_COL, serialized[1]); puts.add(p); @@ -690,7 +705,8 @@ void putPartitions(List partitions) throws IOException { conn.flush(htab); } - void replacePartitions(List oldParts, List newParts, List oldPartTypes) throws IOException { + void replacePartitions(List oldParts, List newParts, + List oldPartTypes, List newPartTypes) throws IOException { if (oldParts.size() != newParts.size()) { throw new RuntimeException("Number of old and new partitions must match."); } @@ -705,14 +721,18 @@ void replacePartitions(List oldParts, List newParts, List< decrementStorageDescriptorRefCount(oldParts.get(i).getSd()); hash = putStorageDescriptor(newParts.get(i).getSd()); } + Partition oldPart = oldParts.get(i); Partition newPart = newParts.get(i); - byte[][] serialized = HBaseUtils.serializePartition(newPart, - HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); + byte[][] serialized = HBaseUtils.serializePartition(newPart, newPartTypes, hash, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Put p = new Put(serialized[0]); p.add(CATALOG_CF, CATALOG_COL, serialized[1]); puts.add(p); partCache.put(newParts.get(i).getDbName(), newParts.get(i).getTableName(), newParts.get(i)); - if (!newParts.get(i).getTableName().equals(oldParts.get(i).getTableName())) { + if (!(newPart.getDbName().equals(oldPart.getDbName()) + && newPart.getTableName().equals(oldPart.getTableName()) + && newPart.getValues().equals(oldPart.getValues()) + && newPartTypes.equals(oldPartTypes))) { // We need to remove the old record as well. deletePartition(oldParts.get(i).getDbName(), oldParts.get(i).getTableName(), oldPartTypes, oldParts.get(i).getValues(), false); @@ -741,7 +761,7 @@ void replacePartitions(List oldParts, List newParts, List< : new ArrayList<>(cached); } byte[] keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, new ArrayList(), - new ArrayList(), false); + new ArrayList(), false, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); List parts = scanPartitionsWithFilter(dbName, tableName, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), -1, null); partCache.put(dbName, tableName, parts, true); @@ -826,7 +846,8 @@ String printPartition(String partKey) throws IOException, TException { byte[] key = HBaseUtils.buildPartitionKey(partKeyParts[0], partKeyParts[1], HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), - Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length))); + Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length)), + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); @SuppressWarnings("deprecation") HTableInterface htab = conn.getHBaseTable(PART_TABLE); Get g = new Get(key); @@ -915,7 +936,8 @@ private void deletePartition(String dbName, String tableName, List partT Partition p = getPartition(dbName, tableName, partVals, false); decrementStorageDescriptorRefCount(p.getSd()); } - byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); delete(PART_TABLE, key, null, null); } @@ -924,7 +946,8 @@ private Partition getPartition(String dbName, String tableName, List par Partition cached = partCache.get(dbName, tableName, partVals); if (cached != null) return cached; byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, - HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals); + HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; HBaseUtils.StorageDescriptorParts sdParts = @@ -981,17 +1004,11 @@ private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName int firstStar = -1; for (int i = 0; i < partVals.size(); i++) { - if ("*".equals(partVals.get(i))) { + if ("*".equals(partVals.get(i)) || "".equals(partVals.get(i))) { firstStar = i; break; } else { - // empty string equals to null partition, - // means star - if (partVals.get(i).equals("")) { - break; - } else { - keyElements.add(partVals.get(i)); - } + keyElements.add(partVals.get(i)); } } @@ -1004,7 +1021,8 @@ private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName } keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys().subList(0, keyElements.size()-2)), - keyElements.subList(2, keyElements.size())); + keyElements.subList(2, keyElements.size()), + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); // Now, build a filter out of the remaining keys List ranges = new ArrayList(); @@ -1014,7 +1032,7 @@ private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName for (int i = Math.max(0, firstStar); i < table.getPartitionKeys().size() && i < partVals.size(); i++) { - if ("*".equals(partVals.get(i))) { + if ("*".equals(partVals.get(i))||"".equals(partVals.get(i))) { PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator( PartitionKeyComparator.Operator.Type.LIKE, table.getPartitionKeys().get(i).getName(), @@ -1354,7 +1372,7 @@ void removeRoleGrants(String roleName) throws IOException { conn.flush(htab); } - // Finally, walk the table table + // Then, walk the table table puts.clear(); for (Database db : dbs) { List tables = scanTables(db.getName(), null); @@ -1378,6 +1396,27 @@ void removeRoleGrants(String roleName) throws IOException { htab.put(puts); conn.flush(htab); } + + // Finally, walk COLUMN_PRIVS_TABLE table + puts.clear(); + Iterator iter = scan(COLUMN_PRIVS_TABLE, CATALOG_CF, CATALOG_COL); + while (iter.hasNext()) { + Result result = iter.next(); + PrincipalPrivilegeSet privs = HBaseUtils.deserializePrincipalPrivilegeSet( + result.getValue(CATALOG_CF, CATALOG_COL)); + if (privs.getRolePrivileges() != null && privs.getRolePrivileges().get(roleName) != null) { + privs.getRolePrivileges().remove(roleName); + Put put = new Put(result.getRow()); + put.add(CATALOG_CF, CATALOG_COL, HBaseUtils.serializePrincipalPrivilegeSet(privs)); + puts.add(put); + } + } + + if (puts.size() > 0) { + HTableInterface htab = conn.getHBaseTable(COLUMN_PRIVS_TABLE); + htab.put(puts); + conn.flush(htab); + } } /** @@ -1501,6 +1540,113 @@ private void buildRoleCache() throws IOException { } } + void deleteColumnPrivilege(String dbName, String tableName, String partName) + throws IOException { + List deletes = new ArrayList(); + byte[] keyPrefix; + if (partName != null) { + keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName, partName); + } else { + keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName); + } + Iterator iter = + scan(COLUMN_PRIVS_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), + CATALOG_CF, CATALOG_COL, new KeyOnlyFilter()); + while (iter.hasNext()) { + Result result = iter.next(); + deletes.add(new Delete(result.getRow())); + } + if (deletes.size() > 0) { + HTableInterface htab = conn.getHBaseTable(COLUMN_PRIVS_TABLE); + htab.delete(deletes); + conn.flush(htab); + } + } + + List> scanColumnPrivilege(ColumnPrivilegeType type) + throws IOException { + List> ret = + new ArrayList>(); + // TODO: Push type filter to server side + Iterator iter = + scan(COLUMN_PRIVS_TABLE, CATALOG_CF, CATALOG_COL); + while (iter.hasNext()) { + Result result = iter.next(); + String[] columnSpec = HBaseUtils.deserializeKey(result.getRow()); + boolean includeInResult = false; + if (type == ColumnPrivilegeType.TABLE) { + if (columnSpec[2] == null || columnSpec[2].isEmpty()) { + includeInResult = true; + } + } else { // ColumnPrivilegeType.PARTITION + if (columnSpec[2] != null && !columnSpec[2].isEmpty()) { + includeInResult = true; + } + } + if (includeInResult) { + byte[] serialized = result.getValue(CATALOG_CF, CATALOG_COL); + if (serialized == null) continue; + PrincipalPrivilegeSet pps = HBaseUtils.deserializePrincipalPrivilegeSet(serialized); + ret.add(new ObjectPair(columnSpec, pps)); + } + } + return ret; + } + + Map scanColumnPrivilege(String dbName, + String tableName, String partName) + throws IOException { + Map ret = new HashMap(); + byte[] keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName, + partName!=null?partName:""); + Iterator iter = + scan(COLUMN_PRIVS_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), + CATALOG_CF, CATALOG_COL, null); + while (iter.hasNext()) { + Result result = iter.next(); + String[] columnSpec = HBaseUtils.deserializeKey(result.getRow()); + byte[] serialized = result.getValue(CATALOG_CF, CATALOG_COL); + if (serialized == null) continue; + PrincipalPrivilegeSet pps = HBaseUtils.deserializePrincipalPrivilegeSet(serialized); + ret.put(columnSpec[3], pps); + } + return ret; + } + + void putColumnPrivilege(String dbName, String tableName, String partName, + Map privsMap) throws IOException { + List puts = new ArrayList<>(); + for (Map.Entry entry : privsMap.entrySet()) { + byte[] key = HBaseUtils.buildKey(dbName, tableName, partName!=null?partName:"" + , entry.getKey()); + Put put = new Put(key); + put.add(CATALOG_CF, CATALOG_COL, HBaseUtils.serializePrincipalPrivilegeSet(entry.getValue())); + puts.add(put); + } + if (puts.size() > 0) { + HTableInterface htab = conn.getHBaseTable(COLUMN_PRIVS_TABLE); + htab.put(puts); + conn.flush(htab); + } + } + + PrincipalPrivilegeSet getColumnPrivilege(String dbName, String tableName, + String partitionName, String columnName) throws IOException { + byte[] key = HBaseUtils.buildKey(dbName, tableName, partitionName!=null?partitionName:"" + , columnName); + byte[] serialized = read(COLUMN_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + return HBaseUtils.deserializePrincipalPrivilegeSet(serialized); + } + + void putColumnPrivilege(String dbName, String tableName, + String partitionName, String columnName, PrincipalPrivilegeSet privs) throws IOException { + byte[] key = HBaseUtils.buildKey(dbName, tableName, partitionName!=null?partitionName:"" + , columnName); + store(COLUMN_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL, + HBaseUtils.serializePrincipalPrivilegeSet(privs)); + } + /********************************************************************************************** * Table related methods *********************************************************************************************/ @@ -1583,7 +1729,8 @@ Table getTable(String dbName, String tableName) throws IOException { } Filter filter = null; if (regex != null) { - filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); + filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator( + dbName + HBaseUtils.KEY_SEPARATOR + regex + "$")); } Iterator iter = scan(TABLE_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), @@ -1633,8 +1780,9 @@ void replaceTable(Table oldTable, Table newTable) throws IOException { byte[][] serialized = HBaseUtils.serializeTable(newTable, hash); store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); tableCache.put(new ObjectPair<>(newTable.getDbName(), newTable.getTableName()), newTable); - if (!oldTable.getTableName().equals(newTable.getTableName())) { - deleteTable(oldTable.getDbName(), oldTable.getTableName()); + if (!(oldTable.getDbName().equals(newTable.getDbName()) && + oldTable.getTableName().equals(newTable.getTableName()))) { + deleteTable(oldTable.getDbName(), oldTable.getTableName(), false); } } @@ -1872,6 +2020,9 @@ void replaceIndex(Index oldIndex, Index newIndex) throws IOException { * @throws IOException */ StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { + if (hash == null || hash.length == 0) { + return null; + } ByteArrayWrapper hashKey = new ByteArrayWrapper(hash); StorageDescriptor cached = sdCache.get(hashKey); if (cached != null) return cached; @@ -1893,6 +2044,9 @@ StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { * @throws IOException */ void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException { + if (sd == null) { + return; + } byte[] key = HBaseUtils.hashStorageDescriptor(sd, md); byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); if (serializedRefCnt == null) { @@ -1926,6 +2080,9 @@ void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException * @return id of the entry in the cache, to be written in for the storage descriptor */ byte[] putStorageDescriptor(StorageDescriptor storageDescriptor) throws IOException { + if (storageDescriptor == null) { + return null; + } byte[] sd = HBaseUtils.serializeStorageDescriptor(storageDescriptor); byte[] key = HBaseUtils.hashStorageDescriptor(storageDescriptor, md); byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); @@ -2012,17 +2169,35 @@ public int hashCode() { * @param tableName table to update statistics for * @param partVals partition values that define partition to update statistics for. If this is * null, then these will be assumed to be table level statistics + * @param colNames list of columns inside stats * @param stats Stats object with stats for one or more columns * @throws IOException + * @throws MetaException */ - void updateStatistics(String dbName, String tableName, List partVals, - ColumnStatistics stats) throws IOException { + void updateStatistics(String dbName, String tableName, List partVals, List colNames, + ColumnStatistics stats) throws IOException, MetaException { byte[] key = getStatisticsKey(dbName, tableName, partVals); String hbaseTable = getStatisticsTable(partVals); + ColumnStatistics orig; + if (partVals == null) { + orig = getTableStatistics(dbName, tableName, colNames); + } else { + orig = getOnePartitionStatistics(dbName, tableName, partVals, colNames); + } + Map origColumnStatsMap = new HashMap(); + if (orig != null && orig.getStatsObj() != null) { + for (ColumnStatisticsObj obj : orig.getStatsObj()) { + origColumnStatsMap.put(obj.getColName(), obj); + } + } byte[][] colnames = new byte[stats.getStatsObjSize()][]; byte[][] serialized = new byte[stats.getStatsObjSize()][]; for (int i = 0; i < stats.getStatsObjSize(); i++) { ColumnStatisticsObj obj = stats.getStatsObj().get(i); + if (origColumnStatsMap.containsKey(obj.getColName())) { + obj = HBaseUtils.mergeColumnStatisticsForOneColumn( + origColumnStatsMap.get(obj.getColName()), obj); + } serialized[i] = HBaseUtils.serializeStatsForOneColumn(stats, obj); String colname = obj.getColName(); colnames[i] = HBaseUtils.buildKey(colname); @@ -2030,6 +2205,16 @@ void updateStatistics(String dbName, String tableName, List partVals, store(hbaseTable, key, STATS_CF, colnames, serialized); } + void deleteStatistics(String dbName, String tableName, List partVals, + String colName) throws IOException { + byte[] key = getStatisticsKey(dbName, tableName, partVals); + String hbaseTable = getStatisticsTable(partVals); + HTableInterface htab = conn.getHBaseTable(hbaseTable); + Delete d = new Delete(key); + d.addColumn(STATS_CF, HBaseUtils.buildKey(colName)); + htab.delete(d); + } + /** * Get statistics for a table * @@ -2067,6 +2252,40 @@ ColumnStatistics getTableStatistics(String dbName, String tblName, List return tableStats; } + ColumnStatistics getOnePartitionStatistics(String dbName, String tblName, + List partVals, List colNames) + throws IOException, MetaException { + String partName = Warehouse.makePartName(getTable(dbName, tblName).getPartitionKeys(), + partVals); + byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, + HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()), + partVals, HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); + ColumnStatistics partStats = new ColumnStatistics(); + ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); + statsDesc.setIsTblLevel(false); + statsDesc.setDbName(dbName); + statsDesc.setTableName(tblName); + statsDesc.setPartName(partName); + partStats.setStatsDesc(statsDesc); + byte[][] colKeys = new byte[colNames.size()][]; + for (int i = 0; i < colKeys.length; i++) { + colKeys[i] = HBaseUtils.buildKey(colNames.get(i)); + } + Result result = read(PART_TABLE, partKey, STATS_CF, colKeys); + for (int i = 0; i < colKeys.length; i++) { + byte[] serializedColStats = result.getValue(STATS_CF, colKeys[i]); + if (serializedColStats == null) { + // There were no stats for this column, so skip it + continue; + } + ColumnStatisticsObj obj = + HBaseUtils.deserializeStatsForOneColumn(partStats, serializedColStats); + obj.setColName(colNames.get(i)); + partStats.addToStatsObj(obj); + } + return partStats; + } + /** * Get statistics for a set of partitions * @@ -2099,7 +2318,7 @@ ColumnStatistics getTableStatistics(String dbName, String tblName, List valToPartMap.put(partVals.get(i), partNames.get(i)); byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()), - partVals.get(i)); + partVals.get(i), HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); Get get = new Get(partKey); for (byte[] colName : colNameBytes) { get.addColumn(STATS_CF, colName); @@ -2219,7 +2438,7 @@ void putAggregatedStats(byte[] key, String dbName, String tableName, List partVals) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 9edf9bf..761b3a9 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -26,6 +26,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.FileMetadataHandler; @@ -72,6 +73,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; +import org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite.ColumnPrivilegeType; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; @@ -105,6 +107,7 @@ private Map fmHandlers; public HBaseStore() { + LOG.info("Using HBase Metastore"); } @Override @@ -313,6 +316,7 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, try { getHBase().deleteTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName)); + getHBase().deleteColumnPrivilege(dbName, tableName, null); commit = true; return true; } catch (IOException e) { @@ -332,9 +336,13 @@ public Table getTable(String dbName, String tableName) throws MetaException { HiveStringUtils.normalizeIdentifier(tableName)); if (table == null) { LOG.debug("Unable to find table " + tableNameForErrorMsg(dbName, tableName)); + return table; } - commit = true; - return table; + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + // For backward compatibility, getPartition does not return privileges + sTable.unsetPrivileges(); + return sTable; } catch (IOException e) { LOG.error("Unable to get table", e); throw new MetaException("Error reading table " + e.getMessage()); @@ -348,10 +356,21 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE boolean commit = false; openTransaction(); try { + Table table = getHBase().getTable(part.getDbName(), part.getTableName()); Partition partCopy = part.deepCopy(); partCopy.setDbName(HiveStringUtils.normalizeIdentifier(part.getDbName())); partCopy.setTableName(HiveStringUtils.normalizeIdentifier(part.getTableName())); + if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + partCopy.setPrivileges(table.getPrivileges().deepCopy()); + } getHBase().putPartition(partCopy); + if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + // Copy table column priv to partition + Map privs = getHBase().scanColumnPrivilege(part.getDbName(), + part.getTableName(), null); + String partName = Warehouse.makePartName(table.getPartitionKeys(), partCopy.getValues()); + getHBase().putColumnPrivilege(part.getDbName(), part.getTableName(), partName, privs); + } commit = true; return true; } catch (IOException e) { @@ -368,12 +387,25 @@ public boolean addPartitions(String dbName, String tblName, List part boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tblName = HiveStringUtils.normalizeIdentifier(tblName); + Table table = getHBase().getTable(dbName, tblName); List partsCopy = new ArrayList(); for (int i=0;i privs = getHBase().scanColumnPrivilege( + partCopy.getDbName(), partCopy.getTableName(), null); + getHBase().putColumnPrivilege(partCopy.getDbName(), partCopy.getTableName(), partName, privs); + } } getHBase().putPartitions(partsCopy); commit = true; @@ -404,8 +436,12 @@ public Partition getPartition(String dbName, String tableName, List part throw new NoSuchObjectException("Unable to find partition " + partNameForErrorMsg(dbName, tableName, part_vals)); } + SharedPartition sPartition = new SharedPartition(); + sPartition.setShared(part); + // For backward compatibility, getPartition does not return privileges + sPartition.unsetPrivileges(); commit = true; - return part; + return sPartition; } catch (IOException e) { LOG.error("Unable to get partition", e); throw new MetaException("Error reading partition " + e.getMessage()); @@ -441,14 +477,17 @@ public boolean dropPartition(String dbName, String tableName, List part_ dbName = HiveStringUtils.normalizeIdentifier(dbName); tableName = HiveStringUtils.normalizeIdentifier(tableName); getHBase().deletePartition(dbName, tableName, HBaseUtils.getPartitionKeyTypes( - getTable(dbName, tableName).getPartitionKeys()), part_vals); + getHBase().getTable(dbName, tableName).getPartitionKeys()), part_vals); // Drop any cached stats that reference this partitions getHBase().getStatsCache().invalidate(dbName, tableName, buildExternalPartName(dbName, tableName, part_vals)); + Table table = getHBase().getTable(dbName, tableName); + String partName = Warehouse.makePartName(table.getPartitionKeys(), part_vals); + getHBase().deleteColumnPrivilege(dbName, tableName, partName); commit = true; return true; } catch (IOException e) { - LOG.error("Unable to delete db" + e); + LOG.error("Unable to drop partition" + e); throw new MetaException("Unable to drop partition " + partNameForErrorMsg(dbName, tableName, part_vals)); } finally { @@ -465,7 +504,7 @@ public boolean dropPartition(String dbName, String tableName, List part_ List parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName), max); commit = true; - return parts; + return HBaseUtils.getSharedPartitionsNoPriv(parts); } catch (IOException e) { LOG.error("Unable to get partitions", e); throw new MetaException("Error scanning partitions"); @@ -480,31 +519,37 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + Table oldTable = getHBase().getTable(dbName, tableName); Table newTableCopy = newTable.deepCopy(); newTableCopy.setDbName(HiveStringUtils.normalizeIdentifier(newTableCopy.getDbName())); - List oldPartTypes = getTable(dbName, tableName).getPartitionKeys()==null? - null:HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()); + List oldPartTypes = getHBase().getTable(dbName, tableName).getPartitionKeys()==null? + null:HBaseUtils.getPartitionKeyTypes(getHBase().getTable(dbName, tableName).getPartitionKeys()); newTableCopy.setTableName(HiveStringUtils.normalizeIdentifier(newTableCopy.getTableName())); - getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)), newTableCopy); - if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0 - && !tableName.equals(newTable.getTableName())) { - // They renamed the table, so we need to change each partition as well, since it changes - // the key. + // alter table don't suppose to change privileges + newTableCopy.setPrivileges(oldTable.getPrivileges()); + if (oldTable.getPartitionKeys() != null && newTable.getPartitionKeys() != null && + (!dbName.equals(newTable.getDbName()) // Change db + || !tableName.equals(newTable.getTableName()) // Rename table + || !newTable.getPartitionKeys().equals(oldTable.getPartitionKeys()))) // Change partition keys + { try { List oldParts = getPartitions(dbName, tableName, -1); List newParts = new ArrayList<>(oldParts.size()); for (Partition oldPart : oldParts) { Partition newPart = oldPart.deepCopy(); + newPart.setDbName(newTable.getDbName()); newPart.setTableName(newTable.getTableName()); newParts.add(newPart); } - getHBase().replacePartitions(oldParts, newParts, oldPartTypes); + getHBase().replacePartitions(oldParts, newParts, oldPartTypes, + HBaseUtils.getPartitionKeyTypes(newTable.getPartitionKeys())); } catch (NoSuchObjectException e) { LOG.debug("No partitions found for old table so not worrying about it"); } - } + getHBase().replaceTable(oldTable, newTableCopy); commit = true; } catch (IOException e) { LOG.error("Unable to alter table " + tableNameForErrorMsg(dbName, tableName), e); @@ -516,6 +561,9 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I @Override public List getTables(String dbName, String pattern) throws MetaException { + if (pattern == null || pattern.isEmpty()) { + return new ArrayList(); + } boolean commit = false; openTransaction(); try { @@ -578,7 +626,7 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I List
tables = getHBase().getTables(HiveStringUtils.normalizeIdentifier(dbname), normalizedTableNames); commit = true; - return tables; + return HBaseUtils.getSharedTablesNoPriv(tables); } catch (IOException e) { LOG.error("Unable to get tables ", e); throw new MetaException("Unable to get tables, " + e.getMessage()); @@ -589,7 +637,7 @@ public void alterTable(String dbName, String tableName, Table newTable) throws I @Override public List getAllTables(String dbName) throws MetaException { - return getTables(dbName, null); + return getTables(dbName, "*"); } @Override @@ -647,13 +695,26 @@ public void alterPartition(String db_name, String tbl_name, List part_va boolean commit = false; openTransaction(); try { + db_name = HiveStringUtils.normalizeIdentifier(db_name); + tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name); Partition new_partCopy = new_part.deepCopy(); new_partCopy.setDbName(HiveStringUtils.normalizeIdentifier(new_partCopy.getDbName())); new_partCopy.setTableName(HiveStringUtils.normalizeIdentifier(new_partCopy.getTableName())); - Partition oldPart = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), part_vals); + Partition oldPart = getHBase().getPartition(db_name, tbl_name, part_vals); + // alter partition don't suppose to change privileges + new_partCopy.setPrivileges(oldPart.getPrivileges()); getHBase().replacePartition(oldPart, new_partCopy, HBaseUtils.getPartitionKeyTypes( - getTable(db_name, tbl_name).getPartitionKeys())); + getHBase().getTable(db_name, tbl_name).getPartitionKeys())); + // Update column stats if needed + if (!(db_name.equals(new_partCopy.getDbName()) && + tbl_name.equals(new_partCopy.getTableName()) && + part_vals.equals(new_partCopy.getValues()))) { + Map privs = getHBase().scanColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), part_vals)); + getHBase().putColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), + new_partCopy.getValues()), privs); + } // Drop any cached stats that reference this partitions getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name), @@ -674,19 +735,32 @@ public void alterPartitions(String db_name, String tbl_name, List> boolean commit = false; openTransaction(); try { + db_name = HiveStringUtils.normalizeIdentifier(db_name); + tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name); List new_partsCopy = new ArrayList(); + List oldParts = getHBase().getPartitions(db_name, tbl_name, + HBaseUtils.getPartitionKeyTypes(getHBase().getTable(db_name, tbl_name).getPartitionKeys()), + part_vals_list); for (int i=0;i oldParts = getHBase().getPartitions(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), - HBaseUtils.getPartitionKeyTypes(getTable(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name)).getPartitionKeys()), part_vals_list); - getHBase().replacePartitions(oldParts, new_partsCopy, HBaseUtils.getPartitionKeyTypes( - getTable(db_name, tbl_name).getPartitionKeys())); + List partKeyTypes = HBaseUtils.getPartitionKeyTypes( + getHBase().getTable(db_name, tbl_name).getPartitionKeys()); + getHBase().replacePartitions(oldParts, new_partsCopy, partKeyTypes, partKeyTypes); + // Update column stats if needed + for (int i=0;i privs = getHBase().scanColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), oldPart.getValues())); + getHBase().putColumnPrivilege(db_name, tbl_name, + Warehouse.makePartName(getHBase().getTable(db_name, tbl_name).getPartitionKeys(), newPart.getValues()), privs); + } for (List part_vals : part_vals_list) { getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name), @@ -838,7 +912,8 @@ public void alterIndex(String dbname, String baseTblName, String name, Index new openTransaction(); try { getPartitionsByExprInternal(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result); + HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result, + HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME)); commit = true; return result; } finally { @@ -853,10 +928,11 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); dbName = HiveStringUtils.normalizeIdentifier(dbName); tblName = HiveStringUtils.normalizeIdentifier(tblName); - Table table = getTable(dbName, tblName); boolean commit = false; - openTransaction(); try { + Table table = getHBase().getTable(dbName, tblName); + openTransaction(); + boolean hasUnknownPartitions; if (exprTree == null) { List partNames = new LinkedList(); @@ -864,10 +940,13 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, table, expr, defaultPartitionName, maxParts, partNames); result.addAll(getPartitionsByNames(dbName, tblName, partNames)); } else { - hasUnknownPartitions = getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result); + hasUnknownPartitions = getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, + result, defaultPartitionName); } commit = true; return hasUnknownPartitions; + } catch (IOException e) { + throw new TException(e); } finally { commitOrRoleBack(commit); } @@ -919,30 +998,37 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, } private boolean getPartitionsByExprInternal(String dbName, String tblName, - ExpressionTree exprTree, short maxParts, List result) throws MetaException, + ExpressionTree exprTree, short maxParts, List result, + String defaultPartitionName) throws MetaException, NoSuchObjectException { dbName = HiveStringUtils.normalizeIdentifier(dbName); tblName = HiveStringUtils.normalizeIdentifier(tblName); - Table table = getTable(dbName, tblName); - if (table == null) { - throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName); - } - // general hbase filter plan from expression tree - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys()); - if (LOG.isDebugEnabled()) { - LOG.debug("Hbase Filter Plan generated : " + planRes.plan); - } + try { + Table table = getHBase().getTable(dbName, tblName); + if (table == null) { + throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName); + } + // general hbase filter plan from expression tree + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys(), + defaultPartitionName); + if (LOG.isDebugEnabled()) { + LOG.debug("Hbase Filter Plan generated : " + planRes.plan); + } - // results from scans need to be merged as there can be overlapping results between - // the scans. Use a map of list of partition values to partition for this. - Map, Partition> mergedParts = new HashMap, Partition>(); - for (ScanPlan splan : planRes.plan.getPlans()) { - try { - List parts = getHBase().scanPartitions(dbName, tblName, - splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys()), - splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys()), - splan.getFilter(table.getPartitionKeys()), -1); + // results from scans need to be merged as there can be overlapping results between + // the scans. Use a map of list of partition values to partition for this. + Map, Partition> mergedParts = new HashMap, Partition>(); + for (ScanPlan splan : planRes.plan.getPlans()) { + List parts; + if (splan.hasUnknown()) { + parts = getHBase().scanPartitionsInTable(dbName, tblName, -1); + } else { + parts = getHBase().scanPartitions(dbName, tblName, + splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys(), defaultPartitionName), + splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys(), defaultPartitionName), + splan.getFilter(table.getPartitionKeys()), -1); + } boolean reachedMax = false; for (Partition part : parts) { mergedParts.put(part.getValues(), part); @@ -954,33 +1040,38 @@ private boolean getPartitionsByExprInternal(String dbName, String tblName, if (reachedMax) { break; } - } catch (IOException e) { - LOG.error("Unable to get partitions", e); - throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) - + ": " + e); } - } - for (Entry, Partition> mp : mergedParts.entrySet()) { - result.add(mp.getValue()); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Matched partitions " + result); - } + for (Entry, Partition> mp : mergedParts.entrySet()) { + result.add(mp.getValue()); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Matched partitions " + result); + } - // return true if there might be some additional partitions that don't match filter conditions - // being returned - return !planRes.hasUnsupportedCondition; + // return true if there might be some additional partitions that don't match filter conditions + // being returned + return planRes.hasUnsupportedCondition; + } catch (IOException e) { + LOG.error("Unable to get partitions", e); + throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) + + ": " + e); + } } @Override public List getPartitionsByNames(String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - List parts = new ArrayList(); - for (String partName : partNames) { - parts.add(getPartition(dbName, tblName, partNameToVals(partName))); + try { + List parts = new ArrayList(); + for (String partName : partNames) { + parts.add(getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName))); + } + return parts; + } catch (IOException e) { + throw new MetaException("Failed to list part names, " + e.getMessage()); } - return parts; } @Override @@ -1166,6 +1257,15 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List g } } + if (global.getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = global.getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + if (global.getRolePrivileges() != null) { List roles = getHBase().getUserRoles(userName); if (roles != null) { @@ -1206,6 +1306,15 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, } } + if (db.getPrivileges().getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = db.getPrivileges().getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + if (db.getPrivileges().getRolePrivileges() != null) { List roles = getHBase().getUserRoles(userName); if (roles != null) { @@ -1235,6 +1344,8 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableNam boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); Table table = getHBase().getTable(dbName, tableName); List pgi; @@ -1246,6 +1357,15 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableNam } } + if (table.getPrivileges().getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = table.getPrivileges().getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + if (table.getPrivileges().getRolePrivileges() != null) { List roles = getHBase().getUserRoles(userName); if (roles != null) { @@ -1273,8 +1393,50 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tabl String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - // We don't support partition privileges - return null; + boolean commit = false; + openTransaction(); + try { + PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); + List partVals = Warehouse.getPartValuesFromPartName(partition); + Partition part = getHBase().getPartition(dbName, tableName, partVals); + List pgi; + if (part.getPrivileges() != null) { + if (part.getPrivileges().getUserPrivileges() != null) { + pgi = part.getPrivileges().getUserPrivileges().get(userName); + if (pgi != null) { + pps.putToUserPrivileges(userName, pgi); + } + } + + if (part.getPrivileges().getGroupPrivileges() != null && groupNames != null && groupNames.size() > 0) { + for (String groupName : groupNames) { + pgi = part.getPrivileges().getGroupPrivileges().get(groupName); + if (pgi != null) { + pps.putToGroupPrivileges(groupName, pgi); + } + } + } + + if (part.getPrivileges().getRolePrivileges() != null) { + List roles = getHBase().getUserRoles(userName); + if (roles != null) { + for (String role : roles) { + pgi = part.getPrivileges().getRolePrivileges().get(role); + if (pgi != null) { + pps.putToRolePrivileges(role, pgi); + } + } + } + } + } + commit = true; + return pps; + } catch (IOException e) { + LOG.error("Unable to get partition privileges for user", e); + throw new MetaException("Unable to get partition privileges for user, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } } @Override @@ -1283,8 +1445,25 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa String userName, List groupNames) throws InvalidObjectException, MetaException { - // We don't support column level privileges - return null; + tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + columnName = HiveStringUtils.normalizeIdentifier(columnName); + + boolean commited = false; + try { + openTransaction(); + PrincipalPrivilegeSet ret = getHBase().getColumnPrivilege(dbName, tableName, + partitionName, columnName); + commited = commitTransaction(); + return ret; + } catch (IOException e) { + LOG.error("Unable to get column privileges for user", e); + throw new MetaException("Unable to get column privileges for user, " + e.getMessage()); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -1303,6 +1482,10 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa map = pps.getUserPrivileges(); break; + case GROUP: + map = pps.getGroupPrivileges(); + break; + case ROLE: map = pps.getRolePrivileges(); break; @@ -1347,6 +1530,10 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa map = pps.getUserPrivileges(); break; + case GROUP: + map = pps.getGroupPrivileges(); + break; + case ROLE: map = pps.getRolePrivileges(); break; @@ -1392,6 +1579,10 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa map = pps.getUserPrivileges(); break; + case GROUP: + map = pps.getGroupPrivileges(); + break; + case ROLE: map = pps.getRolePrivileges(); break; @@ -1424,8 +1615,89 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa String tableName, List partValues, String partName) { - // We don't support partition grants - return new ArrayList(); + List grants; + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + Partition partition = getHBase().getPartition(dbName, tableName, partValues); + if (partition == null) return privileges; + PrincipalPrivilegeSet pps = partition.getPrivileges(); + if (pps == null) return privileges; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case GROUP: + map = pps.getGroupPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) return privileges; + grants = map.get(principalName); + + if (grants == null || grants.size() == 0) return privileges; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partValues, null), principalName, principalType, pgi)); + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + private List listPrincipalColumnGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, List partVals, + String columnName) throws IOException, MetaException { + List grants; + List privileges = new ArrayList(); + String partitionName = null; + if (partVals != null) { + Table table = getHBase().getTable(dbName, tableName); + partitionName = Warehouse.makePartName(table.getPartitionKeys(), partVals); + } + PrincipalPrivilegeSet pps = getHBase().getColumnPrivilege(dbName, tableName, partitionName, columnName); + if (pps == null) return privileges; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case GROUP: + map = pps.getGroupPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) return privileges; + grants = map.get(principalName); + + if (grants == null || grants.size() == 0) return privileges; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.COLUMN, dbName, + tableName, partVals, columnName), principalName, principalType, pgi)); + } + return privileges; } @Override @@ -1433,8 +1705,18 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa PrincipalType principalType, String dbName, String tableName, String columnName) { - // We don't support column grants - return new ArrayList(); + boolean commit = false; + openTransaction(); + try { + return listPrincipalColumnGrants(principalName, principalType, dbName, tableName, + null, columnName); + } catch (MetaException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override @@ -1445,8 +1727,18 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa List partVals, String partName, String columnName) { - // We don't support column grants - return new ArrayList(); + boolean commit = false; + openTransaction(); + try { + return listPrincipalColumnGrants(principalName, principalType, dbName, tableName, + partVals, columnName); + } catch (MetaException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override @@ -1472,6 +1764,9 @@ public boolean grantPrivileges(PrivilegeBag privileges) } commit = true; return true; + } catch (IOException e) { + LOG.error("Unable to grant privilege", e); + throw new MetaException("Error grant privilege " + e.getMessage()); } finally { commitOrRoleBack(commit); } @@ -1498,6 +1793,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) th } commit = true; return true; + } catch (IOException e) { + LOG.error("Unable to revoke privilege", e); + throw new MetaException("Error revoke privilege " + e.getMessage()); } finally { commitOrRoleBack(commit); } @@ -1506,13 +1804,21 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) th private static class PrivilegeInfo { Database db; Table table; + Partition partition; + static class ColumnInfo { + String dbName; + String tableName; + String partitionName; + String columnName; + } + ColumnInfo columnInfo; List grants; String typeErrMsg; PrincipalPrivilegeSet privSet; } private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege) - throws MetaException, NoSuchObjectException, InvalidObjectException { + throws MetaException, NoSuchObjectException, InvalidObjectException, IOException { PrivilegeInfo result = new PrivilegeInfo(); switch (privilege.getHiveObject().getObjectType()) { case GLOBAL: @@ -1526,25 +1832,49 @@ private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege break; case DATABASE: - result.db = getDatabase(privilege.getHiveObject().getDbName()); + result.db = getHBase().getDb(privilege.getHiveObject().getDbName()); result.typeErrMsg = "database " + result.db.getName(); result.privSet = createOnNull(result.db.getPrivileges()); break; case TABLE: - result.table = getTable(privilege.getHiveObject().getDbName(), + result.table = getHBase().getTable(privilege.getHiveObject().getDbName(), privilege.getHiveObject().getObjectName()); - result.typeErrMsg = "table " + result.table.getTableName(); + result.typeErrMsg = "table " + result.table.getDbName() + "." + + result.table.getTableName(); result.privSet = createOnNull(result.table.getPrivileges()); break; case PARTITION: + result.partition = getHBase().getPartition(privilege.getHiveObject().getDbName(), + privilege.getHiveObject().getObjectName(), privilege.getHiveObject().getPartValues()); + result.typeErrMsg = "partition " + result.partition.getDbName() + "." + + result.partition.getTableName() + "." + result.partition.getValues(); + result.privSet = createOnNull(result.partition.getPrivileges()); + break; + case COLUMN: - throw new RuntimeException("HBase metastore does not support partition or column " + - "permissions"); + result.columnInfo = new PrivilegeInfo.ColumnInfo(); + result.columnInfo.dbName = privilege.getHiveObject().getDbName(); + result.columnInfo.tableName = privilege.getHiveObject().getObjectName(); + result.columnInfo.columnName = privilege.getHiveObject().getColumnName(); + if (privilege.getHiveObject().getPartValues() != null) { + Table table = getHBase().getTable(privilege.getHiveObject().getDbName(), + privilege.getHiveObject().getObjectName()); + result.columnInfo.partitionName = Warehouse.makePartName(table.getPartitionKeys(), + privilege.getHiveObject().getPartValues()); + } + result.typeErrMsg = "column " + result.columnInfo.dbName + "." + + result.columnInfo.tableName + "." + + result.columnInfo.partitionName!=null?result.columnInfo.partitionName+".":"" + + result.columnInfo.columnName; + result.privSet = createOnNull(getHBase().getColumnPrivilege(result.columnInfo.dbName, + result.columnInfo.tableName, result.columnInfo.partitionName, + result.columnInfo.columnName)); + break; default: - throw new RuntimeException("Woah bad, unknown object type " + + throw new RuntimeException("Unknown object type " + privilege.getHiveObject().getObjectType()); } @@ -1553,19 +1883,21 @@ private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege switch (privilege.getPrincipalType()) { case USER: grantInfos = result.privSet.getUserPrivileges(); - result.typeErrMsg = "user"; + result.typeErrMsg += ",user " + privilege.getPrincipalType(); break; case GROUP: - throw new RuntimeException("HBase metastore does not support group permissions"); + grantInfos = result.privSet.getGroupPrivileges(); + result.typeErrMsg += "group " + privilege.getPrincipalType(); + break; case ROLE: grantInfos = result.privSet.getRolePrivileges(); - result.typeErrMsg = "role"; + result.typeErrMsg += "role " + privilege.getPrincipalType(); break; default: - throw new RuntimeException("Woah bad, unknown principal type " + + throw new RuntimeException("Unknown principal type " + privilege.getPrincipalType()); } @@ -1587,6 +1919,9 @@ private PrincipalPrivilegeSet createOnNull(PrincipalPrivilegeSet pps) { if (pps.getUserPrivileges() == null) { pps.setUserPrivileges(new HashMap>()); } + if (pps.getGroupPrivileges() == null) { + pps.setGroupPrivileges(new HashMap>()); + } if (pps.getRolePrivileges() == null) { pps.setRolePrivileges(new HashMap>()); } @@ -1594,7 +1929,7 @@ private PrincipalPrivilegeSet createOnNull(PrincipalPrivilegeSet pps) { } private void writeBackGrantOrRevoke(HiveObjectPrivilege priv, PrivilegeInfo pi) - throws MetaException, NoSuchObjectException, InvalidObjectException { + throws MetaException, NoSuchObjectException, InvalidObjectException, IOException { // Now write it back switch (priv.getHiveObject().getObjectType()) { case GLOBAL: @@ -1616,8 +1951,19 @@ private void writeBackGrantOrRevoke(HiveObjectPrivilege priv, PrivilegeInfo pi) alterTable(pi.table.getDbName(), pi.table.getTableName(), pi.table); break; + case PARTITION: + pi.partition.setPrivileges(pi.privSet); + alterPartition(pi.partition.getDbName(), pi.partition.getTableName(), + pi.partition.getValues(), pi.partition); + break; + + case COLUMN: + getHBase().putColumnPrivilege(pi.columnInfo.dbName, pi.columnInfo.tableName, + pi.columnInfo.partitionName, pi.columnInfo.columnName, pi.privSet); + break; + default: - throw new RuntimeException("Dude, you missed the second switch!"); + throw new RuntimeException("Unknown privilege type " + priv.getHiveObject().getObjectType()); } } @@ -1688,6 +2034,10 @@ public Role getRole(String roleName) throws NoSuchObjectException { List roles = listRoles(principalName, principalType); List rpgs = new ArrayList(roles.size()); for (Role role : roles) { + if (role.getRoleName().equals(HiveMetaStore.PUBLIC)) { + rpgs.add(new RolePrincipalGrant(HiveMetaStore.PUBLIC, principalName, PrincipalType.USER, + false, 0, null, null)); + } HbaseMetastoreProto.RoleGrantInfoList grants = getHBase().getRolePrincipals(role.getRoleName()); if (grants != null) { for (HbaseMetastoreProto.RoleGrantInfo grant : grants.getGrantInfoList()) { @@ -1735,8 +2085,11 @@ public Role getRole(String roleName) throws NoSuchObjectException { public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException { - // We don't do authorization checks for partitions. - return getPartition(dbName, tblName, partVals); + try { + return getHBase().getPartition(dbName, tblName, partVals); + } catch (IOException e) { + throw new MetaException("Cannot get partiton, " + e.getMessage()); + } } @Override @@ -1751,14 +2104,18 @@ public Partition getPartitionWithAuth(String dbName, String tblName, List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { - List parts = - listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null); - List partNames = new ArrayList(parts.size()); - for (Partition part : parts) { - partNames.add(buildExternalPartName(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), part.getValues())); + try { + List parts = + listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null); + List partNames = new ArrayList(parts.size()); + for (Partition part : parts) { + partNames.add(buildExternalPartName(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), part.getValues())); + } + return partNames; + } catch (IOException e) { + throw new MetaException("Failed to list part names, " + e.getMessage()); } - return partNames; } @@ -1797,13 +2154,13 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws } String dbName = colStats.getStatsDesc().getDbName(); String tableName = colStats.getStatsDesc().getTableName(); - Table newTable = getTable(dbName, tableName); + Table newTable = getHBase().getTable(dbName, tableName); Table newTableCopy = newTable.deepCopy(); StatsSetupConst.setColumnStatsState(newTableCopy.getParameters(), colNames); getHBase().replaceTable(newTable, newTableCopy); getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), null, colStats); + colStats.getStatsDesc().getTableName(), null, colNames, colStats); commit = true; return true; @@ -1834,10 +2191,10 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, } StatsSetupConst.setColumnStatsState(new_partCopy.getParameters(), colNames); getHBase().replacePartition(oldPart, new_partCopy, - HBaseUtils.getPartitionKeyTypes(getTable(db_name, tbl_name).getPartitionKeys())); + HBaseUtils.getPartitionKeyTypes(getHBase().getTable(db_name, tbl_name).getPartitionKeys())); getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), partVals, colStats); + colStats.getStatsDesc().getTableName(), partVals, colNames, colStats); // We need to invalidate aggregates that include this partition getHBase().getStatsCache().invalidate(colStats.getStatsDesc().getDbName(), colStats.getStatsDesc().getTableName(), colStats.getStatsDesc().getPartName()); @@ -1896,15 +2253,35 @@ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - // NOP, stats will be deleted along with the partition when it is dropped. - return true; + boolean commit = false; + openTransaction(); + try { + getHBase().deleteStatistics(dbName, tableName, partVals, colName); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete column statistics", e); + throw new MetaException("Failed to delete column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } } @Override public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - // NOP, stats will be deleted along with the table when it is dropped. - return true; + boolean commit = false; + openTransaction(); + try { + getHBase().deleteStatistics(dbName, tableName, null, colName); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete column statistics", e); + throw new MetaException("Failed to delete column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } } /** @@ -2128,32 +2505,10 @@ public void dropPartitions(String dbName, String tblName, List partNames try { List dbs = getHBase().scanDatabases(null); for (Database db : dbs) { - List grants; - PrincipalPrivilegeSet pps = db.getPrivileges(); if (pps == null) continue; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - - if (map == null) continue; - grants = map.get(principalName); - if (grants == null || grants.size() == 0) continue; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, - db.getName(), null, null, null), principalName, principalType, pgi)); - } + privileges.addAll(convertToHiveObjectPrivileges(pps, principalName, principalType, + HiveObjectType.DATABASE, db.getName(), null, null, null)); } commit = true; return privileges; @@ -2164,6 +2519,70 @@ public void dropPartitions(String dbName, String tblName, List partNames } } + private List convertToHiveObjectPrivileges(PrincipalPrivilegeSet pps, + String principalName, PrincipalType principalType, HiveObjectType objectType, String dbName, + String tableName, List partVals, String columnName) { + List privileges = new ArrayList(); + List grants; + Map> map; + if (principalName != null && principalType != null) { + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case GROUP: + map = pps.getGroupPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) { + return privileges; + } + grants = map.get(principalName); + if (grants == null || grants.size() == 0) { + return privileges; + } + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(objectType, + dbName, tableName, partVals, columnName), principalName, principalType, pgi)); + } + } else { + map = new HashMap>(); + if (pps.getUserPrivileges() != null) { + addPrivilegesMap(pps.getUserPrivileges(), PrincipalType.USER, HiveObjectType.DATABASE, + dbName, tableName, partVals, columnName, privileges); + } + if (pps.getGroupPrivileges() != null) { + addPrivilegesMap(pps.getGroupPrivileges(), PrincipalType.GROUP, HiveObjectType.DATABASE, + dbName, tableName, partVals, columnName, privileges); + } + if (pps.getRolePrivileges() != null) { + addPrivilegesMap(pps.getRolePrivileges(), PrincipalType.ROLE, HiveObjectType.DATABASE, + dbName, tableName, partVals, columnName, privileges); + } + } + return privileges; + } + + private void addPrivilegesMap(Map> m, PrincipalType principalType, + HiveObjectType objectType, String dbName, String tableName, List partVals, + String columnName, List privileges) { + for (Map.Entry> entry : m.entrySet()) { + for (PrivilegeGrantInfo pgi : entry.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(objectType, + dbName, tableName, partVals, columnName), entry.getKey(), principalType, pgi)); + } + } + } + @Override public List listPrincipalTableGrantsAll(String principalName, PrincipalType principalType) { @@ -2177,29 +2596,8 @@ public void dropPartitions(String dbName, String tblName, List partNames PrincipalPrivilegeSet pps = table.getPrivileges(); if (pps == null) continue; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - - if (map == null) continue; - grants = map.get(principalName); - if (grants == null || grants.size() == 0) continue; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, - table.getDbName(), table.getTableName(), null, null), principalName, principalType, - pgi)); - } + privileges.addAll(convertToHiveObjectPrivileges(pps, principalName, principalType, + HiveObjectType.TABLE, table.getDbName(), table.getTableName(), null, null)); } commit = true; return privileges; @@ -2213,19 +2611,82 @@ public void dropPartitions(String dbName, String tblName, List partNames @Override public List listPrincipalPartitionGrantsAll(String principalName, PrincipalType principalType) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + List
tables = getHBase().scanTables(null, null); + for (Table table : tables) { + List partitions = getHBase().scanPartitionsInTable(table.getDbName(), + table.getTableName(), Integer.MAX_VALUE); + + for (Partition partition : partitions) { + PrincipalPrivilegeSet pps = partition.getPrivileges(); + if (pps == null) continue; + privileges.addAll(convertToHiveObjectPrivileges(pps, principalName, principalType, + HiveObjectType.PARTITION, table.getDbName(), table.getTableName(), + partition.getValues(), null)); + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override public List listPrincipalTableColumnGrantsAll(String principalName, PrincipalType principalType) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + List> columnPps = + getHBase().scanColumnPrivilege(ColumnPrivilegeType.TABLE); + for (ObjectPair pair : columnPps) { + privileges.addAll(convertToHiveObjectPrivileges(pair.getSecond(), principalName, + principalType, HiveObjectType.COLUMN, pair.getFirst()[0], pair.getFirst()[1], + null, pair.getFirst()[3])); + } + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override public List listPrincipalPartitionColumnGrantsAll(String principalName, PrincipalType principalType) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + List> columnPps = + getHBase().scanColumnPrivilege(ColumnPrivilegeType.PARTITION); + for (ObjectPair pair : columnPps) { + privileges.addAll(convertToHiveObjectPrivileges(pair.getSecond(), principalName, + principalType, HiveObjectType.COLUMN, pair.getFirst()[0], pair.getFirst()[1], + Warehouse.getPartValuesFromPartName(pair.getFirst()[2]), pair.getFirst()[3])); + } + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } catch (MetaException e) { + throw new RuntimeException(e); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -2236,16 +2697,28 @@ public void dropPartitions(String dbName, String tblName, List partNames try { PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs(); if (pps != null) { - for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, - null, null, null), e.getKey(), PrincipalType.USER, pgi)); + if (pps.getUserPrivileges() != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.USER, pgi)); + } } } - for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, - null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + if (pps.getGroupPrivileges() != null) { + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } + } + if (pps.getRolePrivileges() != null) { + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + } } } } @@ -2273,6 +2746,12 @@ public void dropPartitions(String dbName, String tblName, List partNames null, null, null), e.getKey(), PrincipalType.USER, pgi)); } } + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, + null, null, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { for (PrivilegeGrantInfo pgi : e.getValue()) { privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, @@ -2293,7 +2772,31 @@ public void dropPartitions(String dbName, String tblName, List partNames public List listPartitionColumnGrantsAll(String dbName, String tableName, String partitionName, String columnName) { - return new ArrayList(); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + columnName = HiveStringUtils.normalizeIdentifier(columnName); + + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + PrincipalPrivilegeSet pps = getHBase().getColumnPrivilege(dbName, tableName, + partitionName, columnName); + privileges.addAll(convertToHiveObjectPrivileges(pps, null, null, + HiveObjectType.COLUMN, dbName, tableName, + Warehouse.getPartValuesFromPartName(partitionName), columnName)); + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } catch (MetaException e) { + throw new RuntimeException(e); + } + finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -2302,19 +2805,33 @@ public void dropPartitions(String dbName, String tblName, List partNames boolean commit = false; openTransaction(); try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); Table table = getHBase().getTable(dbName, tableName); PrincipalPrivilegeSet pps = table.getPrivileges(); if (pps != null) { - for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), e.getKey(), PrincipalType.USER, pgi)); + if (pps.getUserPrivileges() != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.USER, pgi)); + } } } - for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + if (pps.getGroupPrivileges() != null) { + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } + } + if (pps.getRolePrivileges() != null) { + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + } } } } @@ -2330,13 +2847,74 @@ public void dropPartitions(String dbName, String tblName, List partNames @Override public List listPartitionGrantsAll(String dbName, String tableName, String partitionName) { - return new ArrayList(); + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + Table table = getHBase().getTable(dbName, tableName); + List partVals = Warehouse.getPartValuesFromPartName(partitionName); + List partitions = getHBase().scanPartitionsInTable(table.getDbName(), + table.getTableName(), Integer.MAX_VALUE); + for (Partition partition : partitions) { + PrincipalPrivilegeSet pps = partition.getPrivileges(); + if (pps != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partVals, null), e.getKey(), PrincipalType.USER, pgi)); + } + } + for (Map.Entry> e : pps.getGroupPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partVals, null), e.getKey(), PrincipalType.GROUP, pgi)); + } + } + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partVals, null), e.getKey(), PrincipalType.ROLE, pgi)); + } + } + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } catch (MetaException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } } @Override public List listTableColumnGrantsAll(String dbName, String tableName, String columnName) { - return new ArrayList(); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + columnName = HiveStringUtils.normalizeIdentifier(columnName); + + List privileges = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + PrincipalPrivilegeSet pps = getHBase().getColumnPrivilege(dbName, tableName, + null, columnName); + privileges.addAll(convertToHiveObjectPrivileges(pps, null, null, + HiveObjectType.COLUMN, dbName, tableName, null, columnName)); + commited = commitTransaction(); + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (!commited) { + rollbackTransaction(); + } + } } @Override @@ -2438,22 +3016,20 @@ public Function getFunction(String dbName, String funcName) throws MetaException @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { - throw new UnsupportedOperationException(); + return new NotificationEventResponse(); } @Override public void addNotificationEvent(NotificationEvent event) { - throw new UnsupportedOperationException(); } @Override public void cleanNotificationEvents(int olderThan) { - throw new UnsupportedOperationException(); } @Override public CurrentNotificationEventId getCurrentNotificationEventId() { - throw new UnsupportedOperationException(); + return new CurrentNotificationEventId(0); } @Override @@ -2528,8 +3104,8 @@ private String buildExternalPartName(Table table, Partition part) { } private String buildExternalPartName(String dbName, String tableName, List partVals) - throws MetaException { - return buildExternalPartName(getTable(dbName, tableName), partVals); + throws MetaException, IOException { + return buildExternalPartName(getHBase().getTable(dbName, tableName), partVals); } private Set findUsersToRemapRolesFor(Role role, String principalName, PrincipalType type) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index 54daa4a..3438133 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -38,6 +38,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; @@ -45,6 +46,8 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Date; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; @@ -179,6 +182,7 @@ static HbaseMetastoreProto.PrincipalType convertPrincipalTypes(PrincipalType type) { switch (type) { case USER: return HbaseMetastoreProto.PrincipalType.USER; + case GROUP: return HbaseMetastoreProto.PrincipalType.GROUP; case ROLE: return HbaseMetastoreProto.PrincipalType.ROLE; default: throw new RuntimeException("Unknown principal type " + type.toString()); } @@ -192,6 +196,7 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType type) { switch (type) { case USER: return PrincipalType.USER; + case GROUP: return PrincipalType.GROUP; case ROLE: return PrincipalType.ROLE; default: throw new RuntimeException("Unknown principal type " + type.toString()); } @@ -230,6 +235,9 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType typ if (pps.getUserPrivileges() != null) { builder.addAllUsers(buildPrincipalPrivilegeSetEntry(pps.getUserPrivileges())); } + if (pps.getGroupPrivileges() != null) { + builder.addAllGroups(buildPrincipalPrivilegeSetEntry(pps.getGroupPrivileges())); + } if (pps.getRolePrivileges() != null) { builder.addAllRoles(buildPrincipalPrivilegeSetEntry(pps.getRolePrivileges())); } @@ -239,11 +247,15 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType typ private static PrincipalPrivilegeSet buildPrincipalPrivilegeSet( HbaseMetastoreProto.PrincipalPrivilegeSet proto) throws InvalidProtocolBufferException { PrincipalPrivilegeSet pps = null; - if (!proto.getUsersList().isEmpty() || !proto.getRolesList().isEmpty()) { + if (!proto.getUsersList().isEmpty() || !proto.getUsersList().isEmpty() + || !proto.getRolesList().isEmpty()) { pps = new PrincipalPrivilegeSet(); if (!proto.getUsersList().isEmpty()) { pps.setUserPrivileges(convertPrincipalPrivilegeSetEntries(proto.getUsersList())); } + if (!proto.getGroupsList().isEmpty()) { + pps.setGroupPrivileges(convertPrincipalPrivilegeSetEntries(proto.getGroupsList())); + } if (!proto.getRolesList().isEmpty()) { pps.setRolePrivileges(convertPrincipalPrivilegeSetEntries(proto.getRolesList())); } @@ -649,11 +661,11 @@ private static ResourceType convertResourceTypes( } } if (sd.getBucketCols() != null) { - SortedSet bucketCols = new TreeSet<>(sd.getBucketCols()); + List bucketCols = new ArrayList<>(sd.getBucketCols()); for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); } if (sd.getSortCols() != null) { - SortedSet orders = new TreeSet<>(sd.getSortCols()); + List orders = new ArrayList<>(sd.getSortCols()); for (Order order : orders) { md.update(order.getCol().getBytes(ENCODING)); md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); @@ -685,7 +697,7 @@ private static ResourceType convertResourceTypes( } } } - + md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); return md.digest(); } @@ -758,43 +770,64 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) * @param sdHash hash that is being used as a key for the enclosed storage descriptor * @return First element is the key, second is the serialized partition */ - static byte[][] serializePartition(Partition part, List partTypes, byte[] sdHash) { + static byte[][] serializePartition(Partition part, List partTypes, byte[] sdHash, + String defaultPartitionName) { byte[][] result = new byte[2][]; - result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), partTypes, part.getValues()); + result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), partTypes, part.getValues(), + defaultPartitionName); HbaseMetastoreProto.Partition.Builder builder = HbaseMetastoreProto.Partition.newBuilder(); builder .setCreateTime(part.getCreateTime()) .setLastAccessTime(part.getLastAccessTime()); - if (part.getSd().getLocation() != null) builder.setLocation(part.getSd().getLocation()); - if (part.getSd().getParameters() != null) { + if (part.getSd() != null && part.getSd().getLocation() != null) { + builder.setLocation(part.getSd().getLocation()); + } + if (part.getSd() != null && part.getSd().getParameters() != null) { builder.setSdParameters(buildParameters(part.getSd().getParameters())); } - builder.setSdHash(ByteString.copyFrom(sdHash)); + if (sdHash != null) { + builder.setSdHash(ByteString.copyFrom(sdHash)); + } + if (part.getPrivileges() != null) { + builder.setPrivileges(buildPrincipalPrivilegeSet(part.getPrivileges())); + } if (part.getParameters() != null) builder.setParameters(buildParameters(part.getParameters())); result[1] = builder.build().toByteArray(); return result; } - static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals) { - return buildPartitionKey(dbName, tableName, partTypes, partVals, false); + static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, + String defaultPartitionName) { + return buildPartitionKey(dbName, tableName, partTypes, partVals, false, defaultPartitionName); } - static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, boolean endPrefix) { + static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, boolean endPrefix, + String defaultPartitionName) { Object[] components = new Object[partVals.size()]; + boolean lastIsDefault = false; for (int i=0;i partTypes, Object[] components, boolean endPrefix) { + static byte[] buildSerializedPartitionKey(String dbName, String tableName, List partTypes, + Object[] components, boolean endPrefix, boolean lastIsDefault) { ObjectInspector javaStringOI = PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(PrimitiveCategory.STRING); Object[] data = new Object[components.length+2]; @@ -818,7 +851,7 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) } Output output = new Output(); try { - BinarySortableSerDeWithEndPrefix.serializeStruct(output, data, fois, endPrefix); + BinarySortableSerDeWithEndPrefix.serializeStruct(output, data, fois, endPrefix, lastIsDefault); } catch (SerDeException e) { throw new RuntimeException("Cannot serialize partition " + StringUtils.join(components, ",")); } @@ -835,10 +868,13 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) } static void assembleStorageDescriptor(StorageDescriptor sd, StorageDescriptorParts parts) { - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setLocation(parts.location); - ssd.setParameters(parts.parameters); - ssd.setShared(sd); + SharedStorageDescriptor ssd = null; + if (sd != null) { + ssd = new SharedStorageDescriptor(); + ssd.setLocation(parts.location); + ssd.setParameters(parts.parameters); + ssd.setShared(sd); + } if (parts.containingPartition != null) { parts.containingPartition.setSd(ssd); } else if (parts.containingTable != null) { @@ -939,8 +975,9 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa part.setLastAccessTime((int)proto.getLastAccessTime()); if (proto.hasLocation()) sdParts.location = proto.getLocation(); if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); - sdParts.sdHash = proto.getSdHash().toByteArray(); + if (proto.hasSdHash()) sdParts.sdHash = proto.getSdHash().toByteArray(); if (proto.hasParameters()) part.setParameters(buildParameters(proto.getParameters())); + if (proto.hasPrivileges()) part.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); return sdParts; } @@ -1045,7 +1082,9 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa if (table.getSd().getParameters() != null) { builder.setSdParameters(buildParameters(table.getSd().getParameters())); } - builder.setSdHash(ByteString.copyFrom(sdHash)); + if (sdHash != null) { + builder.setSdHash(ByteString.copyFrom(sdHash)); + } if (table.getPartitionKeys() != null) { builder.addAllPartitionKeys(convertFieldSchemaListToProto(table.getPartitionKeys())); } @@ -1058,7 +1097,21 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa if (table.getViewExpandedText() != null) { builder.setViewExpandedText(table.getViewExpandedText()); } - if (table.getTableType() != null) builder.setTableType(table.getTableType()); + // If the table has property EXTERNAL set, update table type + // accordingly + String tableType = table.getTableType(); + boolean isExternal = "TRUE".equals(table.getParameters().get("EXTERNAL")); + if (TableType.MANAGED_TABLE.toString().equals(tableType)) { + if (isExternal) { + tableType = TableType.EXTERNAL_TABLE.toString(); + } + } + if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) { + if (!isExternal) { + tableType = TableType.MANAGED_TABLE.toString(); + } + } + if (tableType != null) builder.setTableType(tableType); if (table.getPrivileges() != null) { builder.setPrivileges(buildPrincipalPrivilegeSet(table.getPrivileges())); } @@ -1106,12 +1159,23 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, table.setRetention((int)proto.getRetention()); if (proto.hasLocation()) sdParts.location = proto.getLocation(); if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); - sdParts.sdHash = proto.getSdHash().toByteArray(); + if (proto.hasSdHash()) sdParts.sdHash = proto.getSdHash().toByteArray(); table.setPartitionKeys(convertFieldSchemaListFromProto(proto.getPartitionKeysList())); table.setParameters(buildParameters(proto.getParameters())); if (proto.hasViewOriginalText()) table.setViewOriginalText(proto.getViewOriginalText()); if (proto.hasViewExpandedText()) table.setViewExpandedText(proto.getViewExpandedText()); - table.setTableType(proto.getTableType()); + String tableType = proto.getTableType(); + if (tableType == null) { + // for backwards compatibility with old metastore persistence + if (table.getViewOriginalText() != null) { + tableType = TableType.VIRTUAL_VIEW.toString(); + } else if ("TRUE".equals(table.getParameters().get("EXTERNAL"))) { + tableType = TableType.EXTERNAL_TABLE.toString(); + } else { + tableType = TableType.MANAGED_TABLE.toString(); + } + } + table.setTableType(tableType); if (proto.hasPrivileges()) { table.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); } @@ -1229,6 +1293,151 @@ static StorageDescriptorParts deserializeIndex(String dbName, String origTableNa return proto.toByteArray(); } + static ColumnStatisticsObj mergeColumnStatisticsForOneColumn(ColumnStatisticsObj orig, + ColumnStatisticsObj updated) { + ColumnStatisticsData origColData = orig.getStatsData(); + ColumnStatisticsData updatedColData = updated.getStatsData(); + switch (updatedColData.getSetField()) { + case BOOLEAN_STATS: + BooleanColumnStatsData origBoolData = origColData.getBooleanStats(); + BooleanColumnStatsData updatedBoolData = updatedColData.getBooleanStats(); + if (updatedBoolData.isSetNumNulls()) { + origBoolData.setNumNulls(updatedBoolData.getNumNulls()); + } + if (updatedBoolData.isSetBitVectors()) { + origBoolData.setBitVectors(updatedBoolData.getBitVectors()); + } + if (updatedBoolData.isSetNumTrues()) { + origBoolData.setNumTrues(updatedBoolData.getNumTrues()); + } + if (updatedBoolData.isSetNumFalses()) { + origBoolData.setNumFalses(updatedBoolData.getNumFalses()); + } + break; + + case LONG_STATS: + LongColumnStatsData origLongData = origColData.getLongStats(); + LongColumnStatsData updatedLongData = updatedColData.getLongStats(); + if (updatedLongData.isSetNumNulls()) { + origLongData.setNumNulls(updatedLongData.getNumNulls()); + } + if (updatedLongData.isSetNumDVs()) { + origLongData.setNumDVs(updatedLongData.getNumDVs()); + } + if (updatedLongData.isSetBitVectors()) { + origLongData.setBitVectors(updatedLongData.getBitVectors()); + } + if (updatedLongData.isSetLowValue()) { + origLongData.setLowValue(updatedLongData.getLowValue()); + } + if (updatedLongData.isSetHighValue()) { + origLongData.setHighValue(updatedLongData.getHighValue()); + } + break; + + case DOUBLE_STATS: + DoubleColumnStatsData origDoubleData = origColData.getDoubleStats(); + DoubleColumnStatsData updatedDoubleData = updatedColData.getDoubleStats(); + if (updatedDoubleData.isSetNumNulls()) { + origDoubleData.setNumNulls(updatedDoubleData.getNumNulls()); + } + if (updatedDoubleData.isSetNumDVs()) { + origDoubleData.setNumDVs(updatedDoubleData.getNumDVs()); + } + if (updatedDoubleData.isSetBitVectors()) { + origDoubleData.setBitVectors(updatedDoubleData.getBitVectors()); + } + if (updatedDoubleData.isSetLowValue()) { + origDoubleData.setLowValue(updatedDoubleData.getLowValue()); + } + if (updatedDoubleData.isSetHighValue()) { + origDoubleData.setHighValue(updatedDoubleData.getHighValue()); + } + break; + + case STRING_STATS: + StringColumnStatsData origStringData = origColData.getStringStats(); + StringColumnStatsData updatedStringData = updatedColData.getStringStats(); + if (updatedStringData.isSetNumNulls()) { + origStringData.setNumNulls(updatedStringData.getNumNulls()); + } + if (updatedStringData.isSetNumDVs()) { + origStringData.setNumDVs(updatedStringData.getNumDVs()); + } + if (updatedStringData.isSetBitVectors()) { + origStringData.setBitVectors(updatedStringData.getBitVectors()); + } + if (updatedStringData.isSetMaxColLen()) { + origStringData.setMaxColLen(updatedStringData.getMaxColLen()); + } + if (updatedStringData.isSetAvgColLen()) { + origStringData.setAvgColLen(updatedStringData.getAvgColLen()); + } + break; + + case BINARY_STATS: + BinaryColumnStatsData origBinaryData = origColData.getBinaryStats(); + BinaryColumnStatsData updatedBinaryData = updatedColData.getBinaryStats(); + if (updatedBinaryData.isSetNumNulls()) { + origBinaryData.setNumNulls(updatedBinaryData.getNumNulls()); + } + if (updatedBinaryData.isSetBitVectors()) { + origBinaryData.setBitVectors(updatedBinaryData.getBitVectors()); + } + if (updatedBinaryData.isSetMaxColLen()) { + origBinaryData.setMaxColLen(updatedBinaryData.getMaxColLen()); + } + if (updatedBinaryData.isSetAvgColLen()) { + origBinaryData.setAvgColLen(updatedBinaryData.getAvgColLen()); + } + break; + + case DECIMAL_STATS: + DecimalColumnStatsData origDecimalData = origColData.getDecimalStats(); + DecimalColumnStatsData updatedDecimalData = updatedColData.getDecimalStats(); + if (updatedDecimalData.isSetNumNulls()) { + origDecimalData.setNumNulls(updatedDecimalData.getNumNulls()); + } + if (updatedDecimalData.isSetNumDVs()) { + origDecimalData.setNumDVs(updatedDecimalData.getNumDVs()); + } + if (updatedDecimalData.isSetBitVectors()) { + origDecimalData.setBitVectors(updatedDecimalData.getBitVectors()); + } + if (updatedDecimalData.isSetLowValue()) { + origDecimalData.setLowValue(updatedDecimalData.getLowValue()); + } + if (updatedDecimalData.isSetHighValue()) { + origDecimalData.setHighValue(updatedDecimalData.getHighValue()); + } + break; + + case DATE_STATS: + DateColumnStatsData origDateData = origColData.getDateStats(); + DateColumnStatsData updatedDateData = updatedColData.getDateStats(); + if (updatedDateData.isSetNumNulls()) { + origDateData.setNumNulls(updatedDateData.getNumNulls()); + } + if (updatedDateData.isSetNumDVs()) { + origDateData.setNumDVs(updatedDateData.getNumDVs()); + } + if (updatedDateData.isSetBitVectors()) { + origDateData.setBitVectors(updatedDateData.getBitVectors()); + } + if (updatedDateData.isSetLowValue()) { + origDateData.setLowValue(updatedDateData.getLowValue()); + } + if (updatedDateData.isSetHighValue()) { + origDateData.setHighValue(updatedDateData.getHighValue()); + } + break; + + default: + throw new RuntimeException("Woh, bad. Unknown stats type!"); + } + return orig; + } + private static HbaseMetastoreProto.ColumnStats protoBufStatsForOneColumn( ColumnStatistics partitionColumnStats, ColumnStatisticsObj colStats) throws IOException { HbaseMetastoreProto.ColumnStats.Builder builder = HbaseMetastoreProto.ColumnStats.newBuilder(); @@ -1244,78 +1453,162 @@ static StorageDescriptorParts deserializeIndex(String dbName, String origTableNa switch (colData.getSetField()) { case BOOLEAN_STATS: BooleanColumnStatsData boolData = colData.getBooleanStats(); - builder.setNumNulls(boolData.getNumNulls()); - builder.setBoolStats(HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() - .setNumTrues(boolData.getNumTrues()).setNumFalses(boolData.getNumFalses()).build()); + if (boolData.isSetNumNulls()) { + builder.setNumNulls(boolData.getNumNulls()); + } + if (boolData.isSetBitVectors()) { + builder.setBitVectors(boolData.getBitVectors()); + } + HbaseMetastoreProto.ColumnStats.BooleanStats.Builder boolBuilder = + HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder(); + if (boolData.isSetNumTrues()) { + boolBuilder.setNumTrues(boolData.getNumTrues()); + } + if (boolData.isSetNumFalses()) { + boolBuilder.setNumFalses(boolData.getNumFalses()); + } + builder.setBoolStats(boolBuilder.build()); break; case LONG_STATS: LongColumnStatsData longData = colData.getLongStats(); - builder.setNumNulls(longData.getNumNulls()); - builder.setNumDistinctValues(longData.getNumDVs()); + if (longData.isSetNumNulls()) { + builder.setNumNulls(longData.getNumNulls()); + } + if (longData.isSetNumDVs()) { + builder.setNumDistinctValues(longData.getNumDVs()); + } if (longData.isSetBitVectors()) { builder.setBitVectors(longData.getBitVectors()); } - builder.setLongStats(HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() - .setLowValue(longData.getLowValue()).setHighValue(longData.getHighValue()).build()); + HbaseMetastoreProto.ColumnStats.LongStats.Builder longBuilder = + HbaseMetastoreProto.ColumnStats.LongStats.newBuilder(); + if (longData.isSetLowValue()) { + longBuilder.setLowValue(longData.getLowValue()); + } + if (longData.isSetHighValue()) { + longBuilder.setHighValue(longData.getHighValue()); + } + builder.setLongStats(longBuilder.build()); break; case DOUBLE_STATS: DoubleColumnStatsData doubleData = colData.getDoubleStats(); - builder.setNumNulls(doubleData.getNumNulls()); - builder.setNumDistinctValues(doubleData.getNumDVs()); + if (doubleData.isSetNumNulls()) { + builder.setNumNulls(doubleData.getNumNulls()); + } + if (doubleData.isSetNumDVs()) { + builder.setNumDistinctValues(doubleData.getNumDVs()); + } if (doubleData.isSetBitVectors()) { builder.setBitVectors(doubleData.getBitVectors()); } - builder.setDoubleStats(HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() - .setLowValue(doubleData.getLowValue()).setHighValue(doubleData.getHighValue()).build()); + HbaseMetastoreProto.ColumnStats.DoubleStats.Builder doubleBuilder = + HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder(); + if (doubleData.isSetLowValue()) { + doubleBuilder.setLowValue(doubleData.getLowValue()); + } + if (doubleData.isSetHighValue()) { + doubleBuilder.setHighValue(doubleData.getHighValue()); + } + builder.setDoubleStats(doubleBuilder.build()); break; case STRING_STATS: StringColumnStatsData stringData = colData.getStringStats(); - builder.setNumNulls(stringData.getNumNulls()); - builder.setNumDistinctValues(stringData.getNumDVs()); + if (stringData.isSetNumNulls()) { + builder.setNumNulls(stringData.getNumNulls()); + } + if (stringData.isSetNumDVs()) { + builder.setNumDistinctValues(stringData.getNumDVs()); + } if (stringData.isSetBitVectors()) { builder.setBitVectors(stringData.getBitVectors()); } - builder.setStringStats(HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() - .setMaxColLength(stringData.getMaxColLen()).setAvgColLength(stringData.getAvgColLen()) - .build()); + HbaseMetastoreProto.ColumnStats.StringStats.Builder stringBuilder = + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(); + if (stringData.isSetMaxColLen()) { + stringBuilder.setMaxColLength(stringData.getMaxColLen()); + } + if (stringData.isSetAvgColLen()) { + stringBuilder.setAvgColLength(stringData.getAvgColLen()); + } + builder.setStringStats(stringBuilder.build()); break; case BINARY_STATS: BinaryColumnStatsData binaryData = colData.getBinaryStats(); - builder.setNumNulls(binaryData.getNumNulls()); - builder.setBinaryStats(HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() - .setMaxColLength(binaryData.getMaxColLen()).setAvgColLength(binaryData.getAvgColLen()) - .build()); + if (binaryData.isSetNumNulls()) { + builder.setNumNulls(binaryData.getNumNulls()); + } + HbaseMetastoreProto.ColumnStats.StringStats.Builder binaryBuilder = + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(); + if (binaryData.isSetMaxColLen()) { + binaryBuilder.setMaxColLength(binaryData.getMaxColLen()); + } + if (binaryData.isSetAvgColLen()) { + binaryBuilder.setAvgColLength(binaryData.getAvgColLen()); + } + builder.setBinaryStats(binaryBuilder.build()); break; case DECIMAL_STATS: DecimalColumnStatsData decimalData = colData.getDecimalStats(); - builder.setNumNulls(decimalData.getNumNulls()); - builder.setNumDistinctValues(decimalData.getNumDVs()); + if (decimalData.isSetNumNulls()) { + builder.setNumNulls(decimalData.getNumNulls()); + } + if (decimalData.isSetNumDVs()) { + builder.setNumDistinctValues(decimalData.getNumDVs()); + } if (decimalData.isSetBitVectors()) { builder.setBitVectors(decimalData.getBitVectors()); } if (decimalData.getLowValue() != null && decimalData.getHighValue() != null) { - builder.setDecimalStats( - HbaseMetastoreProto.ColumnStats.DecimalStats - .newBuilder() - .setLowValue( - HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() - .setUnscaled(ByteString.copyFrom(decimalData.getLowValue().getUnscaled())) - .setScale(decimalData.getLowValue().getScale()).build()) - .setHighValue( - HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() - .setUnscaled(ByteString.copyFrom(decimalData.getHighValue().getUnscaled())) - .setScale(decimalData.getHighValue().getScale()).build())).build(); + HbaseMetastoreProto.ColumnStats.DecimalStats.Builder decimalBuilder = + HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder(); + if (decimalData.isSetLowValue()) { + decimalBuilder.setLowValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getLowValue().getUnscaled())) + .setScale(decimalData.getLowValue().getScale()).build()); + } + if (decimalData.isSetHighValue()) { + decimalBuilder.setHighValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getHighValue().getUnscaled())) + .setScale(decimalData.getHighValue().getScale()).build()); + } + builder.setDecimalStats(decimalBuilder.build()); } else { builder.setDecimalStats(HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder().clear() .build()); } break; + case DATE_STATS: + DateColumnStatsData dateData = colData.getDateStats(); + if (dateData.isSetNumNulls()) { + builder.setNumNulls(dateData.getNumNulls()); + } + if (dateData.isSetNumDVs()) { + builder.setNumDistinctValues(dateData.getNumDVs()); + } + if (dateData.isSetBitVectors()) { + builder.setBitVectors(dateData.getBitVectors()); + } + HbaseMetastoreProto.ColumnStats.DateStats.Builder dateBuilder = + HbaseMetastoreProto.ColumnStats.DateStats.newBuilder(); + if (dateData.isSetLowValue()) { + dateBuilder.setLowValue(HbaseMetastoreProto.ColumnStats.Date.newBuilder() + .setDaysSinceEpoch(dateData.getLowValue().getDaysSinceEpoch())); + } + if (dateData.isSetHighValue()) { + dateBuilder.setHighValue(HbaseMetastoreProto.ColumnStats.Date.newBuilder() + .setDaysSinceEpoch(dateData.getHighValue().getDaysSinceEpoch())); + } + builder.setDateStats(dateBuilder.build()); + break; + default: throw new RuntimeException("Woh, bad. Unknown stats type!"); } @@ -1408,6 +1701,22 @@ static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics partiti decimalData.setNumDVs(proto.getNumDistinctValues()); decimalData.setBitVectors(proto.getBitVectors()); colData.setDecimalStats(decimalData); + } else if (proto.hasDateStats()) { + DateColumnStatsData dateData = new DateColumnStatsData(); + if (proto.getDateStats().hasLowValue()) { + Date loVal = new Date(); + loVal.setDaysSinceEpoch(proto.getDateStats().getLowValue().getDaysSinceEpoch()); + dateData.setLowValue(loVal); + } + if (proto.getDateStats().hasHighValue()) { + Date hiVal = new Date(); + hiVal.setDaysSinceEpoch(proto.getDateStats().getHighValue().getDaysSinceEpoch()); + dateData.setHighValue(hiVal); + } + dateData.setNumNulls(proto.getNumNulls()); + dateData.setNumDVs(proto.getNumDistinctValues()); + dateData.setBitVectors(proto.getBitVectors()); + colData.setDateStats(dateData); } else { throw new RuntimeException("Woh, bad. Unknown stats type!"); } @@ -1525,4 +1834,26 @@ static String deserializeMasterKey(byte[] value) throws InvalidProtocolBufferExc public static double getDoubleValue(Decimal decimal) { return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); } + + static List
getSharedTablesNoPriv(List
tables) { + List
sharedTables = new ArrayList
(); + for (Table t : tables) { + SharedTable sTable = new SharedTable(); + sTable.setShared(t); + sTable.unsetPrivileges(); + sharedTables.add(sTable); + } + return sharedTables; + } + + static List getSharedPartitionsNoPriv(List parts) { + List sharedParts = new ArrayList(); + for (Partition part : parts) { + SharedPartition sPart = new SharedPartition(); + sPart.setShared(part); + sPart.unsetPrivileges(); + sharedParts.add(sPart); + } + return sharedParts; + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java index 2b0863d..323005b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java @@ -264,7 +264,8 @@ public int compareTo(byte[] value, int offset, int length) { NativeOperator nativeOp = nativeOps.get(i); switch (op.type) { case LIKE: - if (!deserializedkeys.get(nativeOp.pos).toString().matches(op.val)) { + if (!(deserializedkeys.get(nativeOp.pos)!=null?deserializedkeys.get(nativeOp.pos):"") + .toString().matches(op.val)) { if (LOG.isDebugEnabled()) { LOG.debug("Fail to match operator " + op.keyName + "(" + deserializedkeys.get(nativeOp.pos) + ") LIKE " + nativeOp.val); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedPartition.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedPartition.java new file mode 100644 index 0000000..4294181 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedPartition.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; + +@SuppressWarnings("serial") +public class SharedPartition extends Partition { + + private boolean parametersCopied = false; + private boolean privilegesCopied = false; + + void setShared(Partition shared) { + if (shared.isSetValues()) { + super.setValues(shared.getValues()); + } + if (shared.isSetDbName()) { + super.setDbName(shared.getDbName()); + } + if (shared.isSetTableName()) { + super.setTableName(shared.getTableName()); + } + if (shared.isSetCreateTime()) { + super.setCreateTime(shared.getCreateTime()); + } + if (shared.isSetLastAccessTime()) { + super.setLastAccessTime(shared.getLastAccessTime()); + } + if (shared.isSetSd()) { + super.setSd(shared.getSd()); + } + if (shared.isSetParameters()) { + super.setParameters(shared.getParameters()); + } + if (shared.isSetPrivileges()) { + super.setPrivileges(shared.getPrivileges()); + } + } + + public void setReadOnly() { + parametersCopied = privilegesCopied = true; + } + + @Override + public StorageDescriptor getSd() { + return super.getSd(); + } + + @Override + public void setSd(StorageDescriptor sd) { + super.setSd(sd); + } + + @Override + public void unsetSd() { + super.unsetSd(); + } + + @Override + public Map getParameters() { + copyParameters(); + return super.getParameters(); + } + + @Override + public void setParameters(Map parameters) { + parametersCopied = true; + super.setParameters(parameters); + } + + @Override + public void unsetParameters() { + parametersCopied = true; + super.unsetParameters(); + } + + private void copyParameters() { + if (!parametersCopied) { + parametersCopied = true; + if (super.getParameters() != null) { + Map parametersCopy = new HashMap(); + for (Map.Entry entry : super.getParameters().entrySet()) { + parametersCopy.put(entry.getKey(), entry.getValue()); + } + super.setParameters(parametersCopy); + } + } + } + + @Override + public PrincipalPrivilegeSet getPrivileges() { + copyPrivileges(); + return super.getPrivileges(); + } + + @Override + public void setPrivileges(PrincipalPrivilegeSet privileges) { + privilegesCopied = true; + super.setPrivileges(privileges); + } + + @Override + public void unsetPrivileges() { + privilegesCopied = true; + super.unsetPrivileges(); + } + + private void copyPrivileges() { + if (!privilegesCopied) { + privilegesCopied = true; + if (super.getPrivileges() != null) { + super.setPrivileges(new PrincipalPrivilegeSet(super.getPrivileges())); + } + } + } +} \ No newline at end of file diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedTable.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedTable.java new file mode 100644 index 0000000..ce702a9 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/SharedTable.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; + +@SuppressWarnings("serial") +public class SharedTable extends Table { + + private boolean partitionKeysCopied = false; + private boolean parametersCopied = false; + private boolean privilegesCopied = false; + + SharedTable() { + } + + void setShared(Table shared) { + if (shared.isSetTableName()) { + super.setTableName(shared.getTableName()); + } + if (shared.isSetDbName()) { + super.setDbName(shared.getDbName()); + } + if (shared.isSetOwner()) { + super.setOwner(shared.getOwner()); + } + if (shared.isSetCreateTime()) { + super.setCreateTime(shared.getCreateTime()); + } + if (shared.isSetLastAccessTime()) { + super.setLastAccessTime(shared.getLastAccessTime()); + } + if (shared.isSetRetention()) { + super.setRetention(shared.getRetention()); + } + if (shared.isSetSd()) { + super.setSd(shared.getSd()); + } + if (shared.isSetPartitionKeys()) { + super.setPartitionKeys(shared.getPartitionKeys()); + } + if (shared.isSetParameters()) { + super.setParameters(shared.getParameters()); + } + if (shared.isSetViewOriginalText()) { + super.setViewOriginalText(shared.getViewOriginalText()); + } + if (shared.isSetViewExpandedText()) { + super.setViewExpandedText(shared.getViewExpandedText()); + } + if (shared.isSetTableType()) { + super.setTableType(shared.getTableType()); + } + if (shared.isSetPrivileges()) { + super.setPrivileges(shared.getPrivileges()); + } + if (shared.isSetTemporary()) { + super.setTemporary(shared.isTemporary()); + } + } + + public void setReadOnly() { + partitionKeysCopied = parametersCopied = privilegesCopied = true; + } + + @Override + public StorageDescriptor getSd() { + return super.getSd(); + } + + @Override + public void setSd(StorageDescriptor sd) { + super.setSd(sd); + } + + @Override + public void unsetSd() { + super.unsetSd(); + } + + @Override + public List getPartitionKeys() { + copyPartitionKeys(); + return super.getPartitionKeys(); + } + + @Override + public void setPartitionKeys(List partitionKeys) { + partitionKeysCopied = true; + super.setPartitionKeys(partitionKeys); + } + + @Override + public void unsetPartitionKeys() { + partitionKeysCopied = true; + super.unsetPartitionKeys(); + } + + private void copyPartitionKeys() { + if (!partitionKeysCopied) { + partitionKeysCopied = true; + if (super.getPartitionKeys() != null) { + List partitionKeysCopy = new ArrayList(); + for (FieldSchema fs : super.getPartitionKeys()) { + partitionKeysCopy.add(new FieldSchema(fs)); + } + super.setPartitionKeys(partitionKeysCopy); + } + } + } + + @Override + public Map getParameters() { + copyParameters(); + return super.getParameters(); + } + + @Override + public void setParameters(Map parameters) { + parametersCopied = true; + super.setParameters(parameters); + } + + @Override + public void unsetParameters() { + parametersCopied = true; + super.unsetParameters(); + } + + private void copyParameters() { + if (!parametersCopied) { + parametersCopied = true; + if (super.getParameters() != null) { + Map parametersCopy = new HashMap(); + for (Map.Entry entry : super.getParameters().entrySet()) { + parametersCopy.put(entry.getKey(), entry.getValue()); + } + super.setParameters(parametersCopy); + } + } + } + + @Override + public PrincipalPrivilegeSet getPrivileges() { + copyPrivileges(); + return super.getPrivileges(); + } + + @Override + public void setPrivileges(PrincipalPrivilegeSet privileges) { + privilegesCopied = true; + super.setPrivileges(privileges); + } + + @Override + public void unsetPrivileges() { + privilegesCopied = true; + super.unsetPrivileges(); + } + + private void copyPrivileges() { + if (!privilegesCopied) { + privilegesCopied = true; + if (super.getPrivileges() != null) { + super.setPrivileges(new PrincipalPrivilegeSet(super.getPrivileges())); + } + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java index 18f8afc..daefbb9 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java @@ -91,12 +91,12 @@ public AggrStats load(StatsCacheKey key) throws Exception { if (aggrStats == null) { misses.incr(); ColumnStatsAggregator aggregator = null; - aggrStats = new AggrStats(); LOG.debug("Unable to find aggregated stats for " + key.colName + ", aggregating"); List css = hrw.getPartitionStatistics(key.dbName, key.tableName, key.partNames, HBaseStore.partNameListToValsList(key.partNames), Collections.singletonList(key.colName)); if (css != null && css.size() > 0) { + aggrStats = new AggrStats(); aggrStats.setPartsFound(css.size()); if (aggregator == null) { aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(css.iterator() diff --git a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto index 6fbe36c..c49a7cf 100644 --- a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -19,7 +19,8 @@ package org.apache.hadoop.hive.metastore.hbase; enum PrincipalType { USER = 0; - ROLE = 1; + GROUP = 1; + ROLE = 2; } message AggrStats { @@ -68,6 +69,15 @@ message ColumnStats { optional double high_value = 2; } + message Date { + required int64 daysSinceEpoch = 1; + } + + message DateStats { + optional Date low_value = 1; + optional Date high_value = 2; + } + message StringStats { optional int64 max_col_length = 1; optional double avg_col_length = 2; @@ -92,8 +102,9 @@ message ColumnStats { optional StringStats string_stats = 8; optional StringStats binary_stats = 9; optional DecimalStats decimal_stats = 10; - optional string column_name = 11; - optional string bit_vectors = 12; + optional DateStats date_stats = 11; + optional string column_name = 12; + optional string bit_vectors = 13; } message Database { @@ -156,9 +167,9 @@ message Partition { optional int64 last_access_time = 2; optional string location = 3; optional Parameters sd_parameters = 4; // storage descriptor parameters - required bytes sd_hash = 5; + optional bytes sd_hash = 5; optional Parameters parameters = 6; // partition parameters - // We don't support partition level privileges + optional PrincipalPrivilegeSet privileges = 7; } message PrincipalPrivilegeSetEntry { @@ -168,7 +179,8 @@ message PrincipalPrivilegeSetEntry { message PrincipalPrivilegeSet { repeated PrincipalPrivilegeSetEntry users = 1; - repeated PrincipalPrivilegeSetEntry roles = 2; + repeated PrincipalPrivilegeSetEntry groups = 2; + repeated PrincipalPrivilegeSetEntry roles = 3; } message PrivilegeGrantInfo { @@ -247,7 +259,7 @@ message Table { optional int64 retention = 4; optional string location = 5; optional Parameters sd_parameters = 6; // storage descriptor parameters - required bytes sd_hash = 7; + optional bytes sd_hash = 7; repeated FieldSchema partition_keys = 8; optional Parameters parameters = 9; optional string view_original_text = 10; diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java index 06884b3..25415a2 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.PartFilterExprUtil; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -44,6 +45,7 @@ public class TestHBaseFilterPlanUtil { final boolean INCLUSIVE = true; + final String defaultPartitionName = HiveConf.ConfVars.DEFAULTPARTITIONNAME.defaultStrVal; /** * Test the function that compares byte arrays @@ -89,10 +91,10 @@ public void testgetComparedMarker() { l = new ScanMarker("1", !INCLUSIVE, "int"); // the rule for null vs non-null is different // non-null is both smaller and greater than null - Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, true)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, true)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, false)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, false)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, true, defaultPartitionName)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, true, defaultPartitionName)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, false, defaultPartitionName)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, false, defaultPartitionName)); // create l that is greater because of the bytes l = new ScanMarker("2", INCLUSIVE, "int"); @@ -102,10 +104,10 @@ public void testgetComparedMarker() { } private void assertFirstGreater(ScanMarker big, ScanMarker small) { - Assert.assertEquals(big, ScanPlan.getComparedMarker(big, small, true)); - Assert.assertEquals(big, ScanPlan.getComparedMarker(small, big, true)); - Assert.assertEquals(small, ScanPlan.getComparedMarker(big, small, false)); - Assert.assertEquals(small, ScanPlan.getComparedMarker(small, big, false)); + Assert.assertEquals(big, ScanPlan.getComparedMarker(big, small, true, defaultPartitionName)); + Assert.assertEquals(big, ScanPlan.getComparedMarker(small, big, true, defaultPartitionName)); + Assert.assertEquals(small, ScanPlan.getComparedMarker(big, small, false, defaultPartitionName)); + Assert.assertEquals(small, ScanPlan.getComparedMarker(small, big, false, defaultPartitionName)); } /** @@ -113,8 +115,8 @@ private void assertFirstGreater(ScanMarker big, ScanMarker small) { */ @Test public void testScanPlanAnd() { - ScanPlan l = new ScanPlan(); - ScanPlan r = new ScanPlan(); + ScanPlan l = new ScanPlan(defaultPartitionName); + ScanPlan r = new ScanPlan(defaultPartitionName); l.setStartMarker("a", "int", "10", INCLUSIVE); r.setStartMarker("a", "int", "10", INCLUSIVE); @@ -147,8 +149,8 @@ public void testScanPlanAnd() { */ @Test public void testScanPlanOr() { - ScanPlan l = new ScanPlan(); - ScanPlan r = new ScanPlan(); + ScanPlan l = new ScanPlan(defaultPartitionName); + ScanPlan r = new ScanPlan(defaultPartitionName); l.setStartMarker("a", "int", "1", INCLUSIVE); r.setStartMarker("a", "int", "11", INCLUSIVE); @@ -167,17 +169,17 @@ public void testScanPlanOr() { @Test public void testMultiScanPlanOr() { - MultiScanPlan l = createMultiScanPlan(new ScanPlan()); - MultiScanPlan r = createMultiScanPlan(new ScanPlan()); + MultiScanPlan l = createMultiScanPlan(new ScanPlan(defaultPartitionName)); + MultiScanPlan r = createMultiScanPlan(new ScanPlan(defaultPartitionName)); // verify OR of two multi plans with one plan each Assert.assertEquals(2, l.or(r).getPlans().size()); // verify OR of multi plan with a single scanplan - Assert.assertEquals(2, l.or(new ScanPlan()).getPlans().size()); - Assert.assertEquals(2, (new ScanPlan()).or(l).getPlans().size()); + Assert.assertEquals(2, l.or(new ScanPlan(defaultPartitionName)).getPlans().size()); + Assert.assertEquals(2, (new ScanPlan(defaultPartitionName)).or(l).getPlans().size()); // verify or of two multiplans with more than one scan plan - r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); + r = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName)); Assert.assertEquals(3, l.or(r).getPlans().size()); Assert.assertEquals(3, r.or(l).getPlans().size()); @@ -192,21 +194,22 @@ private MultiScanPlan createMultiScanPlan(ScanPlan... scanPlans) { */ @Test public void testMultiScanPlanAnd() { - MultiScanPlan l = createMultiScanPlan(new ScanPlan()); - MultiScanPlan r = createMultiScanPlan(new ScanPlan()); + MultiScanPlan l = createMultiScanPlan(new ScanPlan(defaultPartitionName)); + MultiScanPlan r = createMultiScanPlan(new ScanPlan(defaultPartitionName)); // two MultiScanPlan with single scan plans should result in new FilterPlan // with just one scan Assert.assertEquals(1, l.and(r).getPlans().size()); // l has one ScanPlan, r has two. AND result should have two - r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); + r = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName)); Assert.assertEquals(2, l.and(r).getPlans().size()); Assert.assertEquals(2, r.and(l).getPlans().size()); // l has 2 ScanPlans, r has 3. AND result should have 6 - l = createMultiScanPlan(new ScanPlan(), new ScanPlan()); - r = createMultiScanPlan(new ScanPlan(), new ScanPlan(), new ScanPlan()); + l = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName)); + r = createMultiScanPlan(new ScanPlan(defaultPartitionName), new ScanPlan(defaultPartitionName), + new ScanPlan(defaultPartitionName)); Assert.assertEquals(6, l.and(r).getPlans().size()); Assert.assertEquals(6, r.and(l).getPlans().size()); } @@ -268,7 +271,7 @@ private void verifyPlan(TreeNode l, List parts, String keyName, Sca e = new ExpressionTree(); e.setRootForTest(l); } - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); FilterPlan plan = planRes.plan; Assert.assertEquals("Has unsupported condition", hasUnsupportedCondition, planRes.hasUnsupportedCondition); @@ -328,7 +331,7 @@ public void testTreeNodePlan() throws MetaException { tn = new TreeNode(l, LogicalOperator.OR, r); ExpressionTree e = new ExpressionTree(); e.setRootForTest(tn); - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); Assert.assertEquals(2, planRes.plan.getPlans().size()); Assert.assertEquals(false, planRes.hasUnsupportedCondition); @@ -336,7 +339,7 @@ public void testTreeNodePlan() throws MetaException { TreeNode tn2 = new TreeNode(l, LogicalOperator.AND, tn); e = new ExpressionTree(); e.setRootForTest(tn2); - planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); Assert.assertEquals(2, planRes.plan.getPlans().size()); Assert.assertEquals(false, planRes.hasUnsupportedCondition); @@ -349,7 +352,7 @@ public void testTreeNodePlan() throws MetaException { TreeNode tn3 = new TreeNode(tn2, LogicalOperator.OR, klike); e = new ExpressionTree(); e.setRootForTest(tn3); - planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts, defaultPartitionName); Assert.assertEquals(3, planRes.plan.getPlans().size()); Assert.assertEquals(false, planRes.hasUnsupportedCondition); @@ -365,13 +368,13 @@ public void testPartitionKeyScannerAllString() throws Exception { // One prefix key and one minor key range ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); ScanPlan sp = planRes.plan.getPlans().get(0); - byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); RowFilter filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key year, rowfilter contains minor key state @@ -387,13 +390,13 @@ public void testPartitionKeyScannerAllString() throws Exception { // Two prefix key and one LIKE operator exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and month > 10 " + "and month <= 11 and state like 'C%'").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key value year/month, rowfilter contains LIKE operator @@ -409,13 +412,13 @@ public void testPartitionKeyScannerAllString() throws Exception { // One prefix key, one minor key range and one LIKE operator exprTree = PartFilterExprUtil.getFilterParser("year >= 2014 and month > 10 " + "and month <= 11 and state like 'C%'").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key value year (low bound), rowfilter contains minor key state @@ -431,11 +434,11 @@ public void testPartitionKeyScannerAllString() throws Exception { // Condition contains or exprTree = PartFilterExprUtil.getFilterParser("year = 2014 and (month > 10 " + "or month < 3)").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // The first ScanPlan contains year = 2014 and month > 10 @@ -444,8 +447,8 @@ public void testPartitionKeyScannerAllString() throws Exception { Assert.assertTrue(Bytes.contains(startRowSuffix, "10".getBytes())); sp = planRes.plan.getPlans().get(1); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); filter = (RowFilter)sp.getFilter(parts); // The first ScanPlan contains year = 2014 and month < 3 @@ -463,13 +466,13 @@ public void testPartitionKeyScannerMixedType() throws Exception { // One prefix key and one minor key range ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts, defaultPartitionName); Assert.assertEquals(planRes.plan.getPlans().size(), 1); ScanPlan sp = planRes.plan.getPlans().get(0); - byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts, defaultPartitionName); + byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts, defaultPartitionName); RowFilter filter = (RowFilter)sp.getFilter(parts); // scan range contains the major key year, rowfilter contains minor key state diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedPartition.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedPartition.java new file mode 100644 index 0000000..fe533ff --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedPartition.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.Assert; +import org.junit.Test; + +public class TestSharedPartition { + + @Test + public void changeOnParameters() { + Partition part = new Partition(); + Map parameters = new HashMap(); + parameters.put("key1", "value1"); + part.setParameters(parameters); + SharedPartition sPart = new SharedPartition(); + sPart.setShared(part); + sPart.getParameters().put("key2", "value2"); + Assert.assertFalse(sPart.getParameters() == part.getParameters()); + Assert.assertEquals(2, sPart.getParameters().size()); + Assert.assertEquals(1, part.getParameters().size()); + } + + @Test + public void changeOnPrivileges() { + Partition part = new Partition(); + PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet(); + List userPrivileges = new ArrayList(); + PrivilegeGrantInfo userPrivilege = new PrivilegeGrantInfo(); + userPrivilege.setPrivilege("SELECT"); + userPrivileges.add(userPrivilege); + Map> userPrivilegesMap + = new HashMap>(); + userPrivilegesMap.put("test", userPrivileges); + privileges.setUserPrivileges(userPrivilegesMap); + part.setPrivileges(privileges); + SharedPartition sPart = new SharedPartition(); + sPart.setShared(part); + sPart.getPrivileges().setUserPrivileges(null); + Assert.assertNull(sPart.getPrivileges().getUserPrivileges()); + Assert.assertNotNull(part.getPrivileges().getUserPrivileges()); + } + + @Test + public void changeOnSd() { + Partition part = new Partition(); + StorageDescriptor sd = new StorageDescriptor(); + SerDeInfo serde = new SerDeInfo(); + serde.setName("serde"); + sd.setSerdeInfo(serde); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + part.setSd(ssd); + SharedPartition sPart = new SharedPartition(); + sPart.setShared(part); + // StorageDescriptor will not be copied since it is protected by + // SharedStorageDescriptor + Assert.assertTrue(sPart.getSd() == part.getSd()); + sPart.getSd().getSerdeInfo().setName("different"); + Assert.assertEquals("different", sPart.getSd().getSerdeInfo().getName()); + } + + @Test + public void setReadOnly() { + Partition part = new Partition(); + PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet(); + List userPrivileges = new ArrayList(); + PrivilegeGrantInfo userPrivilege = new PrivilegeGrantInfo(); + userPrivilege.setPrivilege("SELECT"); + userPrivileges.add(userPrivilege); + Map> userPrivilegesMap + = new HashMap>(); + userPrivilegesMap.put("test", userPrivileges); + privileges.setUserPrivileges(userPrivilegesMap); + part.setPrivileges(privileges); + SharedPartition sPart = new SharedPartition(); + sPart.setShared(part); + sPart.setReadOnly(); + Assert.assertTrue(sPart.getPrivileges() == part.getPrivileges()); + } + +} diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedTable.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedTable.java new file mode 100644 index 0000000..f10e048 --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedTable.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.Assert; +import org.junit.Test; + +public class TestSharedTable { + + @Test + public void changeOnPartitionKeys() { + Table table = new Table(); + List partKeys = new ArrayList(); + partKeys.add(new FieldSchema("a", "string", "")); + table.setPartitionKeys(partKeys); + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + sTable.getPartitionKeys().get(0).setName("b"); + Assert.assertFalse(sTable.getPartitionKeys() == table.getPartitionKeys().get(0)); + Assert.assertEquals("b", sTable.getPartitionKeys().get(0).getName()); + Assert.assertEquals("a", table.getPartitionKeys().get(0).getName()); + } + + @Test + public void unsetPartitionKeys() { + Table table = new Table(); + List partKeys = new ArrayList(); + partKeys.add(new FieldSchema("a", "string", "")); + table.setPartitionKeys(partKeys); + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + sTable.unsetPartitionKeys(); + Assert.assertFalse(sTable.getPartitionKeys() == table.getPartitionKeys().get(0)); + Assert.assertEquals(0, sTable.getPartitionKeysSize()); + Assert.assertEquals(1, table.getPartitionKeysSize()); + } + + @Test + public void changeOnParameters() { + Table table = new Table(); + Map parameters = new HashMap(); + parameters.put("key1", "value1"); + table.setParameters(parameters); + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + sTable.getParameters().put("key2", "value2"); + Assert.assertFalse(sTable.getParameters() == table.getParameters()); + Assert.assertEquals(2, sTable.getParameters().size()); + Assert.assertEquals(1, table.getParameters().size()); + } + + @Test + public void changeOnPrivileges() { + Table table = new Table(); + PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet(); + List userPrivileges = new ArrayList(); + PrivilegeGrantInfo userPrivilege = new PrivilegeGrantInfo(); + userPrivilege.setPrivilege("SELECT"); + userPrivileges.add(userPrivilege); + Map> userPrivilegesMap + = new HashMap>(); + userPrivilegesMap.put("test", userPrivileges); + privileges.setUserPrivileges(userPrivilegesMap); + table.setPrivileges(privileges); + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + sTable.getPrivileges().setUserPrivileges(null); + Assert.assertNull(sTable.getPrivileges().getUserPrivileges()); + Assert.assertNotNull(table.getPrivileges().getUserPrivileges()); + } + + @Test + public void changeOnSd() { + Table table = new Table(); + StorageDescriptor sd = new StorageDescriptor(); + SerDeInfo serde = new SerDeInfo(); + serde.setName("serde"); + sd.setSerdeInfo(serde); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + table.setSd(ssd); + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + // StorageDescriptor will not be copied since it is protected by + // SharedStorageDescriptor + Assert.assertTrue(sTable.getSd() == table.getSd()); + sTable.getSd().getSerdeInfo().setName("different"); + Assert.assertEquals("different", sTable.getSd().getSerdeInfo().getName()); + } + + @Test + public void setReadOnly() { + Table table = new Table(); + List partKeys = new ArrayList(); + partKeys.add(new FieldSchema("a", "string", "")); + table.setPartitionKeys(partKeys); + SharedTable sTable = new SharedTable(); + sTable.setShared(table); + sTable.setReadOnly(); + Assert.assertTrue(sTable.getPartitionKeys() == table.getPartitionKeys()); + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java index 178a2de..a0a31a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java @@ -228,7 +228,8 @@ private void log(String error) { List colNames = null; if (t != null) { destTableName = t.getDbName() + "." + t.getTableName(); - fieldSchemas = t.getCols(); + fieldSchemas = new ArrayList(); + fieldSchemas.addAll(t.getCols()); } else { // Based on the plan outputs, find out the target table name and column names. for (WriteEntity output : plan.getOutputs()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index c0edde9..bd5f295 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -547,12 +548,14 @@ public boolean isStoredAsSubDirectories() { } public List> getSkewedColValues(){ - return tPartition.getSd().getSkewedInfo().getSkewedColValues(); + return tPartition.getSd().getSkewedInfo()!=null? + tPartition.getSd().getSkewedInfo().getSkewedColValues() : new ArrayList>(); } public List getSkewedColNames() { LOG.debug("sd is " + tPartition.getSd().getClass().getName()); - return tPartition.getSd().getSkewedInfo().getSkewedColNames(); + return tPartition.getSd().getSkewedInfo()!=null? + tPartition.getSd().getSkewedInfo().getSkewedColNames() : new ArrayList(); } public void setSkewedValueLocationMap(List valList, String dirName) @@ -561,6 +564,9 @@ public void setSkewedValueLocationMap(List valList, String dirName) .getSkewedColValueLocationMaps(); if (null == mappings) { mappings = new HashMap, String>(); + if (tPartition.getSd().getSkewedInfo()==null) { + tPartition.getSd().setSkewedInfo(new SkewedInfo()); + } tPartition.getSd().getSkewedInfo().setSkewedColValueLocationMaps(mappings); } @@ -569,7 +575,9 @@ public void setSkewedValueLocationMap(List valList, String dirName) } public Map, String> getSkewedColValueLocationMaps() { - return tPartition.getSd().getSkewedInfo().getSkewedColValueLocationMaps(); + return tPartition.getSd().getSkewedInfo()!=null? + tPartition.getSd().getSkewedInfo().getSkewedColValueLocationMaps() + : new HashMap, String>(); } public void checkValidity() throws HiveException { diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java index 73e20a8..6e0d5fe 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java @@ -26,15 +26,21 @@ public class BinarySortableSerDeWithEndPrefix extends BinarySortableSerDe { public static void serializeStruct(Output byteStream, Object[] fieldData, - List fieldOis, boolean endPrefix) throws SerDeException { + List fieldOis, boolean endPrefix, boolean lastIsDefault) + throws SerDeException { for (int i = 0; i < fieldData.length; i++) { serialize(byteStream, fieldData[i], fieldOis.get(i), false, ZERO, ONE); } if (endPrefix) { - if (fieldData[fieldData.length-1]!=null) { - byteStream.getData()[byteStream.getLength()-1]++; - } else { + // If the last key is default partition, we shall + // only get null partition key, but not other partitions; + // On the contrary, if last key is null, which means + // an open end search, we shall get all partitions with + // the prefix + if (fieldData[fieldData.length-1]==null && !lastIsDefault) { byteStream.getData()[byteStream.getLength()-1]+=2; + } else { + byteStream.getData()[byteStream.getLength()-1]++; } } }