Index: ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
===================================================================
--- ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java (revision 1620734)
+++ ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java (working copy)
@@ -3735,6 +3735,515 @@
// @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
}
+ public interface TimestampStatisticsOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional sint64 minimum = 1;
+ /**
+ * optional sint64 minimum = 1;
+ *
+ *
+ * min,max values saved as milliseconds since epoch + *+ */ + boolean hasMinimum(); + /** + *
optional sint64 minimum = 1;
+ *
+ * + * min,max values saved as milliseconds since epoch + *+ */ + long getMinimum(); + + // optional sint64 maximum = 2; + /** + *
optional sint64 maximum = 2;
+ */
+ boolean hasMaximum();
+ /**
+ * optional sint64 maximum = 2;
+ */
+ long getMaximum();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics}
+ */
+ public static final class TimestampStatistics extends
+ com.google.protobuf.GeneratedMessage
+ implements TimestampStatisticsOrBuilder {
+ // Use TimestampStatistics.newBuilder() to construct.
+ private TimestampStatistics(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TimestampStatistics(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TimestampStatistics defaultInstance;
+ public static TimestampStatistics getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TimestampStatistics getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TimestampStatistics(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ minimum_ = input.readSInt64();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ maximum_ = input.readSInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder.class);
+ }
+
+ public static com.google.protobuf.Parseroptional sint64 minimum = 1;
+ *
+ * + * min,max values saved as milliseconds since epoch + *+ */ + public boolean hasMinimum() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
optional sint64 minimum = 1;
+ *
+ * + * min,max values saved as milliseconds since epoch + *+ */ + public long getMinimum() { + return minimum_; + } + + // optional sint64 maximum = 2; + public static final int MAXIMUM_FIELD_NUMBER = 2; + private long maximum_; + /** + *
optional sint64 maximum = 2;
+ */
+ public boolean hasMaximum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional sint64 maximum = 2;
+ */
+ public long getMaximum() {
+ return maximum_;
+ }
+
+ private void initFields() {
+ minimum_ = 0L;
+ maximum_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeSInt64(1, minimum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeSInt64(2, maximum_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeSInt64Size(1, minimum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeSInt64Size(2, maximum_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builderoptional sint64 minimum = 1;
+ *
+ * + * min,max values saved as milliseconds since epoch + *+ */ + public boolean hasMinimum() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
optional sint64 minimum = 1;
+ *
+ * + * min,max values saved as milliseconds since epoch + *+ */ + public long getMinimum() { + return minimum_; + } + /** + *
optional sint64 minimum = 1;
+ *
+ * + * min,max values saved as milliseconds since epoch + *+ */ + public Builder setMinimum(long value) { + bitField0_ |= 0x00000001; + minimum_ = value; + onChanged(); + return this; + } + /** + *
optional sint64 minimum = 1;
+ *
+ * + * min,max values saved as milliseconds since epoch + *+ */ + public Builder clearMinimum() { + bitField0_ = (bitField0_ & ~0x00000001); + minimum_ = 0L; + onChanged(); + return this; + } + + // optional sint64 maximum = 2; + private long maximum_ ; + /** + *
optional sint64 maximum = 2;
+ */
+ public boolean hasMaximum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional sint64 maximum = 2;
+ */
+ public long getMaximum() {
+ return maximum_;
+ }
+ /**
+ * optional sint64 maximum = 2;
+ */
+ public Builder setMaximum(long value) {
+ bitField0_ |= 0x00000002;
+ maximum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional sint64 maximum = 2;
+ */
+ public Builder clearMaximum() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ maximum_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics)
+ }
+
+ static {
+ defaultInstance = new TimestampStatistics(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics)
+ }
+
public interface BinaryStatisticsOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -4273,6 +4782,20 @@
* optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;
*/
org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder getBinaryStatisticsOrBuilder();
+
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ boolean hasTimestampStatistics();
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics();
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder();
}
/**
* Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.ColumnStatistics}
@@ -4421,6 +4944,19 @@
bitField0_ |= 0x00000080;
break;
}
+ case 74: {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ subBuilder = timestampStatistics_.toBuilder();
+ }
+ timestampStatistics_ = input.readMessage(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(timestampStatistics_);
+ timestampStatistics_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000100;
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4631,6 +5167,28 @@
return binaryStatistics_;
}
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ public static final int TIMESTAMPSTATISTICS_FIELD_NUMBER = 9;
+ private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_;
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public boolean hasTimestampStatistics() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() {
+ return timestampStatistics_;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() {
+ return timestampStatistics_;
+ }
+
private void initFields() {
numberOfValues_ = 0L;
intStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.IntegerStatistics.getDefaultInstance();
@@ -4640,6 +5198,7 @@
decimalStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.getDefaultInstance();
dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -4677,6 +5236,9 @@
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeMessage(8, binaryStatistics_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeMessage(9, timestampStatistics_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4718,6 +5280,10 @@
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, binaryStatistics_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(9, timestampStatistics_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4833,6 +5399,7 @@
getDecimalStatisticsFieldBuilder();
getDateStatisticsFieldBuilder();
getBinaryStatisticsFieldBuilder();
+ getTimestampStatisticsFieldBuilder();
}
}
private static Builder create() {
@@ -4885,6 +5452,12 @@
binaryStatisticsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ } else {
+ timestampStatisticsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
return this;
}
@@ -4973,6 +5546,14 @@
} else {
result.binaryStatistics_ = binaryStatisticsBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ if (timestampStatisticsBuilder_ == null) {
+ result.timestampStatistics_ = timestampStatistics_;
+ } else {
+ result.timestampStatistics_ = timestampStatisticsBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -5013,6 +5594,9 @@
if (other.hasBinaryStatistics()) {
mergeBinaryStatistics(other.getBinaryStatistics());
}
+ if (other.hasTimestampStatistics()) {
+ mergeTimestampStatistics(other.getTimestampStatistics());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -5892,6 +6476,123 @@
return binaryStatisticsBuilder_;
}
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder> timestampStatisticsBuilder_;
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public boolean hasTimestampStatistics() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() {
+ if (timestampStatisticsBuilder_ == null) {
+ return timestampStatistics_;
+ } else {
+ return timestampStatisticsBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public Builder setTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) {
+ if (timestampStatisticsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ timestampStatistics_ = value;
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public Builder setTimestampStatistics(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder builderForValue) {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = builderForValue.build();
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public Builder mergeTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) {
+ if (timestampStatisticsBuilder_ == null) {
+ if (((bitField0_ & 0x00000100) == 0x00000100) &&
+ timestampStatistics_ != org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance()) {
+ timestampStatistics_ =
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.newBuilder(timestampStatistics_).mergeFrom(value).buildPartial();
+ } else {
+ timestampStatistics_ = value;
+ }
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public Builder clearTimestampStatistics() {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
+ return this;
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder getTimestampStatisticsBuilder() {
+ bitField0_ |= 0x00000100;
+ onChanged();
+ return getTimestampStatisticsFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() {
+ if (timestampStatisticsBuilder_ != null) {
+ return timestampStatisticsBuilder_.getMessageOrBuilder();
+ } else {
+ return timestampStatistics_;
+ }
+ }
+ /**
+ * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder>
+ getTimestampStatisticsFieldBuilder() {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder>(
+ timestampStatistics_,
+ getParentForChildren(),
+ isClean());
+ timestampStatistics_ = null;
+ }
+ return timestampStatisticsBuilder_;
+ }
+
// @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.ColumnStatistics)
}
@@ -16654,6 +17355,11 @@
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -16742,74 +17448,78 @@
"nt\030\001 \003(\004B\002\020\001\"B\n\021DecimalStatistics\022\017\n\007min" +
"imum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t" +
"\"2\n\016DateStatistics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007m",
- "aximum\030\002 \001(\021\"\037\n\020BinaryStatistics\022\013\n\003sum\030" +
- "\001 \001(\022\"\310\004\n\020ColumnStatistics\022\026\n\016numberOfVa" +
- "lues\030\001 \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org." +
- "apache.hadoop.hive.ql.io.orc.IntegerStat" +
- "istics\022L\n\020doubleStatistics\030\003 \001(\01322.org.a" +
- "pache.hadoop.hive.ql.io.orc.DoubleStatis" +
- "tics\022L\n\020stringStatistics\030\004 \001(\01322.org.apa" +
- "che.hadoop.hive.ql.io.orc.StringStatisti" +
- "cs\022L\n\020bucketStatistics\030\005 \001(\01322.org.apach" +
- "e.hadoop.hive.ql.io.orc.BucketStatistics",
- "\022N\n\021decimalStatistics\030\006 \001(\01323.org.apache" +
- ".hadoop.hive.ql.io.orc.DecimalStatistics" +
- "\022H\n\016dateStatistics\030\007 \001(\01320.org.apache.ha" +
- "doop.hive.ql.io.orc.DateStatistics\022L\n\020bi" +
- "naryStatistics\030\010 \001(\01322.org.apache.hadoop" +
- ".hive.ql.io.orc.BinaryStatistics\"n\n\rRowI" +
- "ndexEntry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstat" +
- "istics\030\002 \001(\01322.org.apache.hadoop.hive.ql" +
- ".io.orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005" +
- "entry\030\001 \003(\0132/.org.apache.hadoop.hive.ql.",
- "io.orc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030" +
- "\001 \002(\0162-.org.apache.hadoop.hive.ql.io.orc" +
- ".Stream.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003" +
- " \001(\004\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006L" +
- "ENGTH\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONA" +
- "RY_COUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006" +
- "\"\263\001\n\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org." +
- "apache.hadoop.hive.ql.io.orc.ColumnEncod" +
- "ing.Kind\022\026\n\016dictionarySize\030\002 \001(\r\"D\n\004Kind" +
- "\022\n\n\006DIRECT\020\000\022\016\n\nDICTIONARY\020\001\022\r\n\tDIRECT_V",
- "2\020\002\022\021\n\rDICTIONARY_V2\020\003\"\214\001\n\014StripeFooter\022" +
- "9\n\007streams\030\001 \003(\0132(.org.apache.hadoop.hiv" +
- "e.ql.io.orc.Stream\022A\n\007columns\030\002 \003(\01320.or" +
- "g.apache.hadoop.hive.ql.io.orc.ColumnEnc" +
- "oding\"\370\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apach" +
- "e.hadoop.hive.ql.io.orc.Type.Kind\022\024\n\010sub" +
- "types\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\022\025\n\rm" +
- "aximumLength\030\004 \001(\r\022\021\n\tprecision\030\005 \001(\r\022\r\n" +
- "\005scale\030\006 \001(\r\"\321\001\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004BY" +
- "TE\020\001\022\t\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FL",
- "OAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022\n\n\006BINARY\020" +
- "\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006S" +
- "TRUCT\020\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DATE\020" +
- "\017\022\013\n\007VARCHAR\020\020\022\010\n\004CHAR\020\021\"x\n\021StripeInform" +
- "ation\022\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002 \001" +
- "(\004\022\022\n\ndataLength\030\003 \001(\004\022\024\n\014footerLength\030\004" +
- " \001(\004\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMetada" +
- "taItem\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"X\n\020S" +
- "tripeStatistics\022D\n\010colStats\030\001 \003(\01322.org." +
- "apache.hadoop.hive.ql.io.orc.ColumnStati",
- "stics\"S\n\010Metadata\022G\n\013stripeStats\030\001 \003(\01322" +
- ".org.apache.hadoop.hive.ql.io.orc.Stripe" +
- "Statistics\"\356\002\n\006Footer\022\024\n\014headerLength\030\001 " +
- "\001(\004\022\025\n\rcontentLength\030\002 \001(\004\022D\n\007stripes\030\003 " +
- "\003(\01323.org.apache.hadoop.hive.ql.io.orc.S" +
- "tripeInformation\0225\n\005types\030\004 \003(\0132&.org.ap" +
- "ache.hadoop.hive.ql.io.orc.Type\022D\n\010metad" +
- "ata\030\005 \003(\01322.org.apache.hadoop.hive.ql.io" +
- ".orc.UserMetadataItem\022\024\n\014numberOfRows\030\006 " +
- "\001(\004\022F\n\nstatistics\030\007 \003(\01322.org.apache.had",
- "oop.hive.ql.io.orc.ColumnStatistics\022\026\n\016r" +
- "owIndexStride\030\010 \001(\r\"\305\001\n\nPostScript\022\024\n\014fo" +
- "oterLength\030\001 \001(\004\022F\n\013compression\030\002 \001(\01621." +
- "org.apache.hadoop.hive.ql.io.orc.Compres" +
- "sionKind\022\034\n\024compressionBlockSize\030\003 \001(\004\022\023" +
- "\n\007version\030\004 \003(\rB\002\020\001\022\026\n\016metadataLength\030\005 " +
- "\001(\004\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKind\022\010" +
- "\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
+ "aximum\030\002 \001(\021\"7\n\023TimestampStatistics\022\017\n\007m" +
+ "inimum\030\001 \001(\022\022\017\n\007maximum\030\002 \001(\022\"\037\n\020BinaryS" +
+ "tatistics\022\013\n\003sum\030\001 \001(\022\"\234\005\n\020ColumnStatist" +
+ "ics\022\026\n\016numberOfValues\030\001 \001(\004\022J\n\rintStatis" +
+ "tics\030\002 \001(\01323.org.apache.hadoop.hive.ql.i" +
+ "o.orc.IntegerStatistics\022L\n\020doubleStatist" +
+ "ics\030\003 \001(\01322.org.apache.hadoop.hive.ql.io" +
+ ".orc.DoubleStatistics\022L\n\020stringStatistic" +
+ "s\030\004 \001(\01322.org.apache.hadoop.hive.ql.io.o" +
+ "rc.StringStatistics\022L\n\020bucketStatistics\030",
+ "\005 \001(\01322.org.apache.hadoop.hive.ql.io.orc" +
+ ".BucketStatistics\022N\n\021decimalStatistics\030\006" +
+ " \001(\01323.org.apache.hadoop.hive.ql.io.orc." +
+ "DecimalStatistics\022H\n\016dateStatistics\030\007 \001(" +
+ "\01320.org.apache.hadoop.hive.ql.io.orc.Dat" +
+ "eStatistics\022L\n\020binaryStatistics\030\010 \001(\01322." +
+ "org.apache.hadoop.hive.ql.io.orc.BinaryS" +
+ "tatistics\022R\n\023timestampStatistics\030\t \001(\01325" +
+ ".org.apache.hadoop.hive.ql.io.orc.Timest" +
+ "ampStatistics\"n\n\rRowIndexEntry\022\025\n\tpositi",
+ "ons\030\001 \003(\004B\002\020\001\022F\n\nstatistics\030\002 \001(\01322.org." +
+ "apache.hadoop.hive.ql.io.orc.ColumnStati" +
+ "stics\"J\n\010RowIndex\022>\n\005entry\030\001 \003(\0132/.org.a" +
+ "pache.hadoop.hive.ql.io.orc.RowIndexEntr" +
+ "y\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(\0162-.org.apache." +
+ "hadoop.hive.ql.io.orc.Stream.Kind\022\016\n\006col" +
+ "umn\030\002 \001(\r\022\016\n\006length\030\003 \001(\004\"r\n\004Kind\022\013\n\007PRE" +
+ "SENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGTH\020\002\022\023\n\017DICTIONA" +
+ "RY_DATA\020\003\022\024\n\020DICTIONARY_COUNT\020\004\022\r\n\tSECON" +
+ "DARY\020\005\022\r\n\tROW_INDEX\020\006\"\263\001\n\016ColumnEncoding",
+ "\022C\n\004kind\030\001 \002(\01625.org.apache.hadoop.hive." +
+ "ql.io.orc.ColumnEncoding.Kind\022\026\n\016diction" +
+ "arySize\030\002 \001(\r\"D\n\004Kind\022\n\n\006DIRECT\020\000\022\016\n\nDIC" +
+ "TIONARY\020\001\022\r\n\tDIRECT_V2\020\002\022\021\n\rDICTIONARY_V" +
+ "2\020\003\"\214\001\n\014StripeFooter\0229\n\007streams\030\001 \003(\0132(." +
+ "org.apache.hadoop.hive.ql.io.orc.Stream\022" +
+ "A\n\007columns\030\002 \003(\01320.org.apache.hadoop.hiv" +
+ "e.ql.io.orc.ColumnEncoding\"\370\002\n\004Type\0229\n\004k" +
+ "ind\030\001 \002(\0162+.org.apache.hadoop.hive.ql.io" +
+ ".orc.Type.Kind\022\024\n\010subtypes\030\002 \003(\rB\002\020\001\022\022\n\n",
+ "fieldNames\030\003 \003(\t\022\025\n\rmaximumLength\030\004 \001(\r\022" +
+ "\021\n\tprecision\030\005 \001(\r\022\r\n\005scale\030\006 \001(\r\"\321\001\n\004Ki" +
+ "nd\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003" +
+ "INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n" +
+ "\n\006STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n" +
+ "\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022" +
+ "\013\n\007DECIMAL\020\016\022\010\n\004DATE\020\017\022\013\n\007VARCHAR\020\020\022\010\n\004C" +
+ "HAR\020\021\"x\n\021StripeInformation\022\016\n\006offset\030\001 \001" +
+ "(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\ndataLength\030\003 " +
+ "\001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024\n\014numberOfRow",
+ "s\030\005 \001(\004\"/\n\020UserMetadataItem\022\014\n\004name\030\001 \002(" +
+ "\t\022\r\n\005value\030\002 \002(\014\"X\n\020StripeStatistics\022D\n\010" +
+ "colStats\030\001 \003(\01322.org.apache.hadoop.hive." +
+ "ql.io.orc.ColumnStatistics\"S\n\010Metadata\022G" +
+ "\n\013stripeStats\030\001 \003(\01322.org.apache.hadoop." +
+ "hive.ql.io.orc.StripeStatistics\"\356\002\n\006Foot" +
+ "er\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rcontentLengt" +
+ "h\030\002 \001(\004\022D\n\007stripes\030\003 \003(\01323.org.apache.ha" +
+ "doop.hive.ql.io.orc.StripeInformation\0225\n" +
+ "\005types\030\004 \003(\0132&.org.apache.hadoop.hive.ql",
+ ".io.orc.Type\022D\n\010metadata\030\005 \003(\01322.org.apa" +
+ "che.hadoop.hive.ql.io.orc.UserMetadataIt" +
+ "em\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatistics\030\007" +
+ " \003(\01322.org.apache.hadoop.hive.ql.io.orc." +
+ "ColumnStatistics\022\026\n\016rowIndexStride\030\010 \001(\r" +
+ "\"\305\001\n\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n" +
+ "\013compression\030\002 \001(\01621.org.apache.hadoop.h" +
+ "ive.ql.io.orc.CompressionKind\022\034\n\024compres" +
+ "sionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001" +
+ "\022\026\n\016metadataLength\030\005 \001(\004\022\016\n\005magic\030\300> \001(\t",
+ "*:\n\017CompressionKind\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022" +
+ "\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -16852,86 +17562,92 @@
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor,
new java.lang.String[] { "Minimum", "Maximum", });
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor,
+ new java.lang.String[] { "Minimum", "Maximum", });
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(7);
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor,
new java.lang.String[] { "Sum", });
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor,
- new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", });
+ new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", "TimestampStatistics", });
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor,
new java.lang.String[] { "Positions", "Statistics", });
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor,
new java.lang.String[] { "Entry", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor,
new java.lang.String[] { "Kind", "Column", "Length", });
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor,
new java.lang.String[] { "Kind", "DictionarySize", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor,
new java.lang.String[] { "Streams", "Columns", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor,
new java.lang.String[] { "Kind", "Subtypes", "FieldNames", "MaximumLength", "Precision", "Scale", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor,
new java.lang.String[] { "Offset", "IndexLength", "DataLength", "FooterLength", "NumberOfRows", });
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(17);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor,
new java.lang.String[] { "ColStats", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(18);
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor,
new java.lang.String[] { "StripeStats", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor,
new java.lang.String[] { "HeaderLength", "ContentLength", "Stripes", "Types", "Metadata", "NumberOfRows", "Statistics", "RowIndexStride", });
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor,
Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java (revision 1620734)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java (working copy)
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hive.ql.io.orc;
+import java.sql.Timestamp;
+
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -716,6 +718,99 @@
}
}
+ private static final class TimestampStatisticsImpl extends ColumnStatisticsImpl
+ implements TimestampColumnStatistics {
+ private Long minimum = null;
+ private Long maximum = null;
+
+ TimestampStatisticsImpl() {
+ }
+
+ TimestampStatisticsImpl(OrcProto.ColumnStatistics stats) {
+ super(stats);
+ OrcProto.TimestampStatistics timestampStats = stats.getTimestampStatistics();
+ // min,max values serialized/deserialized as int (milliseconds since epoch)
+ if (timestampStats.hasMaximum()) {
+ maximum = timestampStats.getMaximum();
+ }
+ if (timestampStats.hasMinimum()) {
+ minimum = timestampStats.getMinimum();
+ }
+ }
+
+ @Override
+ void reset() {
+ super.reset();
+ minimum = null;
+ maximum = null;
+ }
+
+ @Override
+ void updateTimestamp(Timestamp value) {
+ if (minimum == null) {
+ minimum = value.getTime();
+ maximum = value.getTime();
+ } else if (minimum > value.getTime()) {
+ minimum = value.getTime();
+ } else if (maximum < value.getTime()) {
+ maximum = value.getTime();
+ }
+ }
+
+ @Override
+ void merge(ColumnStatisticsImpl other) {
+ super.merge(other);
+ TimestampStatisticsImpl timestampStats = (TimestampStatisticsImpl) other;
+ if (minimum == null) {
+ minimum = timestampStats.minimum;
+ maximum = timestampStats.maximum;
+ } else if (timestampStats.minimum != null) {
+ if (minimum > timestampStats.minimum) {
+ minimum = timestampStats.minimum;
+ } else if (maximum < timestampStats.maximum) {
+ maximum = timestampStats.maximum;
+ }
+ }
+ }
+
+ @Override
+ OrcProto.ColumnStatistics.Builder serialize() {
+ OrcProto.ColumnStatistics.Builder result = super.serialize();
+ OrcProto.TimestampStatistics.Builder timestampStats = OrcProto.TimestampStatistics
+ .newBuilder();
+ if (getNumberOfValues() != 0) {
+ timestampStats.setMinimum(minimum);
+ timestampStats.setMaximum(maximum);
+ }
+ result.setTimestampStatistics(timestampStats);
+ return result;
+ }
+
+ @Override
+ public Timestamp getMinimum() {
+ Timestamp minTimestamp = new Timestamp(minimum);
+ return minTimestamp;
+ }
+
+ @Override
+ public Timestamp getMaximum() {
+ Timestamp maxTimestamp = new Timestamp(maximum);
+ return maxTimestamp;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder buf = new StringBuilder(super.toString());
+ if (getNumberOfValues() != 0) {
+ buf.append(" min: ");
+ buf.append(minimum);
+ buf.append(" max: ");
+ buf.append(maximum);
+ }
+ return buf.toString();
+ }
+ }
+
private long count = 0;
ColumnStatisticsImpl(OrcProto.ColumnStatistics stats) {
@@ -759,6 +854,10 @@
throw new UnsupportedOperationException("Can't update date");
}
+ void updateTimestamp(Timestamp value) {
+ throw new UnsupportedOperationException("Can't update timestamp");
+ }
+
void merge(ColumnStatisticsImpl stats) {
count += stats.count;
}
@@ -806,6 +905,8 @@
return new DecimalStatisticsImpl();
case DATE:
return new DateStatisticsImpl();
+ case TIMESTAMP:
+ return new TimestampStatisticsImpl();
case BINARY:
return new BinaryStatisticsImpl();
default:
@@ -829,6 +930,8 @@
return new DecimalStatisticsImpl(stats);
} else if (stats.hasDateStatistics()) {
return new DateStatisticsImpl(stats);
+ } else if (stats.hasTimestampStatistics()) {
+ return new TimestampStatisticsImpl(stats);
} else if(stats.hasBinaryStatistics()) {
return new BinaryStatisticsImpl(stats);
} else {
Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java (revision 1620734)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java (working copy)
@@ -2251,6 +2251,8 @@
return ((DateColumnStatistics) index).getMaximum();
} else if (index instanceof DecimalColumnStatistics) {
return ((DecimalColumnStatistics) index).getMaximum();
+ } else if (index instanceof TimestampColumnStatistics) {
+ return ((TimestampColumnStatistics) index).getMaximum();
} else {
return null;
}
@@ -2273,6 +2275,8 @@
return ((DateColumnStatistics) index).getMinimum();
} else if (index instanceof DecimalColumnStatistics) {
return ((DecimalColumnStatistics) index).getMinimum();
+ } else if (index instanceof TimestampColumnStatistics) {
+ return ((TimestampColumnStatistics) index).getMinimum();
} else {
return null;
}
Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/TimestampColumnStatistics.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/TimestampColumnStatistics.java (revision 0)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/TimestampColumnStatistics.java (working copy)
@@ -0,0 +1,20 @@
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.sql.Timestamp;
+
+/**
+ * Statistics for Timestamp columns.
+ */
+public interface TimestampColumnStatistics extends ColumnStatistics {
+ /**
+ * Get the minimum value for the column.
+ * @return minimum value
+ */
+ Timestamp getMinimum();
+
+ /**
+ * Get the maximum value for the column.
+ * @return maximum value
+ */
+ Timestamp getMaximum();
+}
Index: ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java (revision 1620734)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java (working copy)
@@ -1317,6 +1317,7 @@
Timestamp val =
((TimestampObjectInspector) inspector).
getPrimitiveJavaObject(obj);
+ indexStatistics.updateTimestamp(val);
seconds.write((val.getTime() / MILLIS_PER_SECOND) - BASE_TIMESTAMP);
nanos.write(formatNanos(val.getNanos()));
}
Index: ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java (revision 1620734)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java (working copy)
@@ -47,7 +47,8 @@
FLOAT, // float and double
STRING, // string, char, varchar
DATE,
- DECIMAL
+ DECIMAL,
+ TIMESTAMP
}
/**
Index: ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java (revision 1620734)
+++ ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java (working copy)
@@ -21,6 +21,7 @@
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
+
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.common.type.HiveChar;
@@ -50,6 +51,7 @@
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import java.math.BigDecimal;
+import java.sql.Timestamp;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
@@ -107,6 +109,12 @@
@Override
public Object getLiteral() {
+ // To get around a kryo 2.22 bug while deserialize a Timestamp into Date
+ // (https://github.com/EsotericSoftware/kryo/issues/88)
+ // When we see a Date, convert back into Timestamp
+ if (literal instanceof java.util.Date) {
+ return new Timestamp(((java.util.Date)literal).getTime());
+ }
return literal;
}
@@ -317,6 +325,8 @@
return PredicateLeaf.Type.FLOAT;
case DATE:
return PredicateLeaf.Type.DATE;
+ case TIMESTAMP:
+ return PredicateLeaf.Type.TIMESTAMP;
case DECIMAL:
return PredicateLeaf.Type.DECIMAL;
default:
@@ -354,6 +364,7 @@
case FLOAT:
return ((Number) lit.getValue()).doubleValue();
case DATE:
+ case TIMESTAMP:
case DECIMAL:
return lit;
default:
@@ -948,6 +959,7 @@
literal instanceof Long ||
literal instanceof Double ||
literal instanceof DateWritable ||
+ literal instanceof Timestamp ||
literal instanceof HiveDecimal ||
literal instanceof BigDecimal) {
return literal;
@@ -981,7 +993,9 @@
return PredicateLeaf.Type.FLOAT;
} else if (literal instanceof DateWritable) {
return PredicateLeaf.Type.DATE;
- } else if (literal instanceof HiveDecimal ||
+ } else if (literal instanceof Timestamp) {
+ return PredicateLeaf.Type.TIMESTAMP;
+ }else if (literal instanceof HiveDecimal ||
literal instanceof BigDecimal) {
return PredicateLeaf.Type.DECIMAL;
}
Index: ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto
===================================================================
--- ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto (revision 1620734)
+++ ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto (working copy)
@@ -53,6 +53,12 @@
optional sint32 maximum = 2;
}
+message TimestampStatistics {
+ // min,max values saved as milliseconds since epoch
+ optional sint64 minimum = 1;
+ optional sint64 maximum = 2;
+}
+
message BinaryStatistics {
// sum will store the total binary blob length in a stripe
optional sint64 sum = 1;
@@ -67,6 +73,7 @@
optional DecimalStatistics decimalStatistics = 6;
optional DateStatistics dateStatistics = 7;
optional BinaryStatistics binaryStatistics = 8;
+ optional TimestampStatistics timestampStatistics = 9;
}
message RowIndexEntry {
Index: ql/src/test/queries/clientpositive/orc_ppd_timestamp.q
===================================================================
--- ql/src/test/queries/clientpositive/orc_ppd_timestamp.q (revision 0)
+++ ql/src/test/queries/clientpositive/orc_ppd_timestamp.q (working copy)
@@ -0,0 +1,97 @@
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SET mapred.min.split.size=1000;
+SET mapred.max.split.size=5000;
+
+create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), ts timestamp) stored as orc tblproperties("orc.stripe.size"="16777216");
+
+insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl;
+
+-- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
+select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20));
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20));
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where ts