diff --git ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
index e3ab3ac..f4cc240 100644
--- ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
+++ ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
@@ -10603,6 +10603,21 @@ public Builder clearDictionarySize() {
*/
org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnEncodingOrBuilder getColumnsOrBuilder(
int index);
+
+ // optional string writerTimezone = 3;
+ /**
+ * optional string writerTimezone = 3;
+ */
+ boolean hasWriterTimezone();
+ /**
+ * optional string writerTimezone = 3;
+ */
+ java.lang.String getWriterTimezone();
+ /**
+ * optional string writerTimezone = 3;
+ */
+ com.google.protobuf.ByteString
+ getWriterTimezoneBytes();
}
/**
* Protobuf type {@code orc.proto.StripeFooter}
@@ -10671,6 +10686,11 @@ private StripeFooter(
columns_.add(input.readMessage(org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnEncoding.PARSER, extensionRegistry));
break;
}
+ case 26: {
+ bitField0_ |= 0x00000001;
+ writerTimezone_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -10716,6 +10736,7 @@ public StripeFooter parsePartialFrom(
return PARSER;
}
+ private int bitField0_;
// repeated .orc.proto.Stream streams = 1;
public static final int STREAMS_FIELD_NUMBER = 1;
private java.util.List streams_;
@@ -10788,9 +10809,53 @@ public int getColumnsCount() {
return columns_.get(index);
}
+ // optional string writerTimezone = 3;
+ public static final int WRITERTIMEZONE_FIELD_NUMBER = 3;
+ private java.lang.Object writerTimezone_;
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public boolean hasWriterTimezone() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public java.lang.String getWriterTimezone() {
+ java.lang.Object ref = writerTimezone_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ writerTimezone_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public com.google.protobuf.ByteString
+ getWriterTimezoneBytes() {
+ java.lang.Object ref = writerTimezone_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ writerTimezone_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
private void initFields() {
streams_ = java.util.Collections.emptyList();
columns_ = java.util.Collections.emptyList();
+ writerTimezone_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -10810,6 +10875,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
for (int i = 0; i < columns_.size(); i++) {
output.writeMessage(2, columns_.get(i));
}
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(3, getWriterTimezoneBytes());
+ }
getUnknownFields().writeTo(output);
}
@@ -10827,6 +10895,10 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, columns_.get(i));
}
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getWriterTimezoneBytes());
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -10957,6 +11029,8 @@ public Builder clear() {
} else {
columnsBuilder_.clear();
}
+ writerTimezone_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@@ -10984,6 +11058,7 @@ public Builder clone() {
public org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter buildPartial() {
org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter(this);
int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
if (streamsBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
streams_ = java.util.Collections.unmodifiableList(streams_);
@@ -11002,6 +11077,11 @@ public Builder clone() {
} else {
result.columns_ = columnsBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.writerTimezone_ = writerTimezone_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@@ -11069,6 +11149,11 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter
}
}
}
+ if (other.hasWriterTimezone()) {
+ bitField0_ |= 0x00000004;
+ writerTimezone_ = other.writerTimezone_;
+ onChanged();
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -11576,6 +11661,80 @@ public Builder removeColumns(int index) {
return columnsBuilder_;
}
+ // optional string writerTimezone = 3;
+ private java.lang.Object writerTimezone_ = "";
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public boolean hasWriterTimezone() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public java.lang.String getWriterTimezone() {
+ java.lang.Object ref = writerTimezone_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ writerTimezone_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public com.google.protobuf.ByteString
+ getWriterTimezoneBytes() {
+ java.lang.Object ref = writerTimezone_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ writerTimezone_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public Builder setWriterTimezone(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ writerTimezone_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public Builder clearWriterTimezone() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ writerTimezone_ = getDefaultInstance().getWriterTimezone();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string writerTimezone = 3;
+ */
+ public Builder setWriterTimezoneBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ writerTimezone_ = value;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:orc.proto.StripeFooter)
}
@@ -18921,40 +19080,41 @@ public Builder setMagicBytes(
"ng\022,\n\004kind\030\001 \001(\0162\036.orc.proto.ColumnEncod" +
"ing.Kind\022\026\n\016dictionarySize\030\002 \001(\r\"D\n\004Kind" +
"\022\n\n\006DIRECT\020\000\022\016\n\nDICTIONARY\020\001\022\r\n\tDIRECT_V",
- "2\020\002\022\021\n\rDICTIONARY_V2\020\003\"^\n\014StripeFooter\022\"" +
+ "2\020\002\022\021\n\rDICTIONARY_V2\020\003\"v\n\014StripeFooter\022\"" +
"\n\007streams\030\001 \003(\0132\021.orc.proto.Stream\022*\n\007co" +
- "lumns\030\002 \003(\0132\031.orc.proto.ColumnEncoding\"\341" +
- "\002\n\004Type\022\"\n\004kind\030\001 \001(\0162\024.orc.proto.Type.K" +
- "ind\022\024\n\010subtypes\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030" +
- "\003 \003(\t\022\025\n\rmaximumLength\030\004 \001(\r\022\021\n\tprecisio" +
- "n\030\005 \001(\r\022\r\n\005scale\030\006 \001(\r\"\321\001\n\004Kind\022\013\n\007BOOLE" +
- "AN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LO" +
- "NG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022" +
- "\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003",
- "MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020" +
- "\016\022\010\n\004DATE\020\017\022\013\n\007VARCHAR\020\020\022\010\n\004CHAR\020\021\"x\n\021St" +
- "ripeInformation\022\016\n\006offset\030\001 \001(\004\022\023\n\013index" +
- "Length\030\002 \001(\004\022\022\n\ndataLength\030\003 \001(\004\022\024\n\014foot" +
- "erLength\030\004 \001(\004\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020" +
- "UserMetadataItem\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030" +
- "\002 \001(\014\"A\n\020StripeStatistics\022-\n\010colStats\030\001 " +
- "\003(\0132\033.orc.proto.ColumnStatistics\"<\n\010Meta" +
- "data\0220\n\013stripeStats\030\001 \003(\0132\033.orc.proto.St" +
- "ripeStatistics\"\222\002\n\006Footer\022\024\n\014headerLengt",
- "h\030\001 \001(\004\022\025\n\rcontentLength\030\002 \001(\004\022-\n\007stripe" +
- "s\030\003 \003(\0132\034.orc.proto.StripeInformation\022\036\n" +
- "\005types\030\004 \003(\0132\017.orc.proto.Type\022-\n\010metadat" +
- "a\030\005 \003(\0132\033.orc.proto.UserMetadataItem\022\024\n\014" +
- "numberOfRows\030\006 \001(\004\022/\n\nstatistics\030\007 \003(\0132\033" +
- ".orc.proto.ColumnStatistics\022\026\n\016rowIndexS" +
- "tride\030\010 \001(\r\"\305\001\n\nPostScript\022\024\n\014footerLeng" +
- "th\030\001 \001(\004\022/\n\013compression\030\002 \001(\0162\032.orc.prot" +
- "o.CompressionKind\022\034\n\024compressionBlockSiz" +
- "e\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001\022\026\n\016metadata",
- "Length\030\005 \001(\004\022\025\n\rwriterVersion\030\006 \001(\r\022\016\n\005m" +
- "agic\030\300> \001(\t*:\n\017CompressionKind\022\010\n\004NONE\020\000" +
- "\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003B\"\n org.a" +
- "pache.hadoop.hive.ql.io.orc"
+ "lumns\030\002 \003(\0132\031.orc.proto.ColumnEncoding\022\026" +
+ "\n\016writerTimezone\030\003 \001(\t\"\341\002\n\004Type\022\"\n\004kind\030" +
+ "\001 \001(\0162\024.orc.proto.Type.Kind\022\024\n\010subtypes\030" +
+ "\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\022\025\n\rmaximum" +
+ "Length\030\004 \001(\r\022\021\n\tprecision\030\005 \001(\r\022\r\n\005scale" +
+ "\030\006 \001(\r\"\321\001\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t" +
+ "\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022" +
+ "\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tT",
+ "IMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020" +
+ "\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DATE\020\017\022\013\n\007V" +
+ "ARCHAR\020\020\022\010\n\004CHAR\020\021\"x\n\021StripeInformation\022" +
+ "\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\n" +
+ "dataLength\030\003 \001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024" +
+ "\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMetadataItem" +
+ "\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\014\"A\n\020StripeS" +
+ "tatistics\022-\n\010colStats\030\001 \003(\0132\033.orc.proto." +
+ "ColumnStatistics\"<\n\010Metadata\0220\n\013stripeSt" +
+ "ats\030\001 \003(\0132\033.orc.proto.StripeStatistics\"\222",
+ "\002\n\006Footer\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rconte" +
+ "ntLength\030\002 \001(\004\022-\n\007stripes\030\003 \003(\0132\034.orc.pr" +
+ "oto.StripeInformation\022\036\n\005types\030\004 \003(\0132\017.o" +
+ "rc.proto.Type\022-\n\010metadata\030\005 \003(\0132\033.orc.pr" +
+ "oto.UserMetadataItem\022\024\n\014numberOfRows\030\006 \001" +
+ "(\004\022/\n\nstatistics\030\007 \003(\0132\033.orc.proto.Colum" +
+ "nStatistics\022\026\n\016rowIndexStride\030\010 \001(\r\"\305\001\n\n" +
+ "PostScript\022\024\n\014footerLength\030\001 \001(\004\022/\n\013comp" +
+ "ression\030\002 \001(\0162\032.orc.proto.CompressionKin" +
+ "d\022\034\n\024compressionBlockSize\030\003 \001(\004\022\023\n\007versi",
+ "on\030\004 \003(\rB\002\020\001\022\026\n\016metadataLength\030\005 \001(\004\022\025\n\r" +
+ "writerVersion\030\006 \001(\r\022\016\n\005magic\030\300> \001(\t*:\n\017C" +
+ "ompressionKind\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SN" +
+ "APPY\020\002\022\007\n\003LZO\020\003B\"\n org.apache.hadoop.hiv" +
+ "e.ql.io.orc"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -19056,7 +19216,7 @@ public Builder setMagicBytes(
internal_static_orc_proto_StripeFooter_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_orc_proto_StripeFooter_descriptor,
- new java.lang.String[] { "Streams", "Columns", });
+ new java.lang.String[] { "Streams", "Columns", "WriterTimezone", });
internal_static_orc_proto_Type_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_orc_proto_Type_fieldAccessorTable = new
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
index 9788c16..55770fd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
@@ -50,7 +50,7 @@
* A tool for printing out the file structure of ORC files.
*/
public final class FileDump {
- private static final String ROWINDEX_PREFIX = "--rowindex=";
+ private static final String UNKNOWN = "UNKNOWN";
// not used
private FileDump() {}
@@ -77,9 +77,13 @@ public static void main(String[] args) throws Exception {
}
}
+ boolean printTimeZone = false;
+ if (cli.hasOption('t')) {
+ printTimeZone = true;
+ }
String[] files = cli.getArgs();
if (dumpData) printData(Arrays.asList(files), conf);
- else printMetaData(Arrays.asList(files), conf, rowIndexCols);
+ else printMetaData(Arrays.asList(files), conf, rowIndexCols, printTimeZone);
}
private static void printData(List files, Configuration conf) throws IOException,
@@ -90,7 +94,7 @@ private static void printData(List files, Configuration conf) throws IOE
}
private static void printMetaData(List files, Configuration conf,
- List rowIndexCols) throws IOException {
+ List rowIndexCols, boolean printTimeZone) throws IOException {
for (String filename : files) {
System.out.println("Structure for " + filename);
Path path = new Path(filename);
@@ -125,11 +129,19 @@ private static void printMetaData(List files, Configuration conf,
for (StripeInformation stripe : reader.getStripes()) {
++stripeIx;
long stripeStart = stripe.getOffset();
- System.out.println(" Stripe: " + stripe.toString());
OrcProto.StripeFooter footer = rows.readStripeFooter(stripe);
+ if (printTimeZone) {
+ String tz = footer.getWriterTimezone();
+ if (tz == null || tz.isEmpty()) {
+ tz = UNKNOWN;
+ }
+ System.out.println(" Stripe: " + stripe.toString() + " timezone: " + tz);
+ } else {
+ System.out.println(" Stripe: " + stripe.toString());
+ }
long sectionStart = stripeStart;
for(OrcProto.Stream section: footer.getStreamsList()) {
- String kind = section.hasKind() ? section.getKind().name() : "UNKNOWN";
+ String kind = section.hasKind() ? section.getKind().name() : UNKNOWN;
System.out.println(" Stream: column " + section.getColumn() +
" section " + kind + " start: " + sectionStart +
" length " + section.getLength());
@@ -278,6 +290,13 @@ static Options createOptions() {
.withDescription("Should the data be printed")
.create('d'));
+ // to avoid breaking unit tests (when run in different time zones) for file dump, printing
+ // of timezone is made optional
+ result.addOption(OptionBuilder
+ .withLongOpt("timezone")
+ .withDescription("Print writer's time zone")
+ .create('t'));
+
result.addOption(OptionBuilder
.withLongOpt("help")
.withDescription("print help message")
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index 458ad21..6d06df5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -26,12 +26,15 @@
import java.nio.ByteBuffer;
import java.sql.Date;
import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import java.util.TimeZone;
import java.util.TreeMap;
import org.apache.commons.lang.StringUtils;
@@ -266,7 +269,7 @@ static int findColumns(String[] columnNames,
int bufferSize,
long strideRate,
Configuration conf
- ) throws IOException {
+ ) throws IOException {
this.path = path;
this.file = fileSystem.open(path);
this.codec = codec;
@@ -376,9 +379,9 @@ IntegerReader createIntegerReader(OrcProto.ColumnEncoding.Kind kind,
}
void startStripe(Map streams,
- List encoding
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- checkEncoding(encoding.get(columnId));
+ checkEncoding(stripeFooter.getColumnsList().get(columnId));
InStream in = streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.PRESENT));
if (in == null) {
@@ -465,9 +468,9 @@ Object nextVector(Object previousVector, long batchSize) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
reader = new BitFieldReader(streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.DATA)), 1);
}
@@ -525,9 +528,9 @@ Object nextVector(Object previousVector, long batchSize) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
reader = new RunLengthByteReader(streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.DATA)));
}
@@ -594,12 +597,13 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
- reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true);
+ reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+ streams.get(name), true);
}
@Override
@@ -664,12 +668,13 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
- reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true);
+ reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+ streams.get(name), true);
}
@Override
@@ -734,12 +739,13 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
- reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true);
+ reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+ streams.get(name), true);
}
@Override
@@ -797,9 +803,9 @@ void skipRows(long items) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
stream = streams.get(name);
@@ -879,9 +885,9 @@ void skipRows(long items) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name =
new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
@@ -970,14 +976,14 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
stream = streams.get(name);
- lengths = createIntegerReader(encodings.get(columnId).getKind(), streams.get(new
- StreamName(columnId, OrcProto.Stream.Kind.LENGTH)), false);
+ lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+ streams.get(new StreamName(columnId, OrcProto.Stream.Kind.LENGTH)), false);
}
@Override
@@ -1042,10 +1048,18 @@ void skipRows(long items) throws IOException {
private static class TimestampTreeReader extends TreeReader{
private IntegerReader data = null;
private IntegerReader nanos = null;
- private final LongColumnVector nanoVector = new LongColumnVector();
+ private Map baseTimestampMap;
+ private long base_timestamp;
+ private TimeZone readerTimeZone;
+ private TimeZone writerTimeZone;
- TimestampTreeReader(Path path, int columnId, Configuration conf) {
+ TimestampTreeReader(Path path, int columnId, Configuration conf)
+ throws IOException {
super(path, columnId, conf);
+ this.baseTimestampMap = new HashMap<>();
+ this.readerTimeZone = TimeZone.getDefault();
+ this.writerTimeZone = readerTimeZone;
+ this.base_timestamp = getBaseTimestamp(readerTimeZone.getID());
}
@Override
@@ -1059,15 +1073,41 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
- data = createIntegerReader(encodings.get(columnId).getKind(),
+ super.startStripe(streams, stripeFooter);
+ data = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.DATA)), true);
- nanos = createIntegerReader(encodings.get(columnId).getKind(),
+ nanos = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.SECONDARY)), false);
+ base_timestamp = getBaseTimestamp(stripeFooter.getWriterTimezone());
+ }
+
+ private long getBaseTimestamp(String timeZoneId) throws IOException {
+ // to make sure new readers read old files in the same way
+ if (timeZoneId == null || timeZoneId.isEmpty()) {
+ timeZoneId = readerTimeZone.getID();
+ }
+
+ if (!baseTimestampMap.containsKey(timeZoneId)) {
+ writerTimeZone = TimeZone.getTimeZone(timeZoneId);
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ sdf.setTimeZone(writerTimeZone);
+ try {
+ long epoch =
+ sdf.parse(WriterImpl.BASE_TIMESTAMP_STRING).getTime() / WriterImpl.MILLIS_PER_SECOND;
+ baseTimestampMap.put(timeZoneId, epoch);
+ return epoch;
+ } catch (ParseException e) {
+ throw new IOException("Unable to create base timestamp", e);
+ } finally {
+ sdf.setTimeZone(readerTimeZone);
+ }
+ }
+
+ return baseTimestampMap.get(timeZoneId);
}
@Override
@@ -1087,9 +1127,7 @@ Object next(Object previous) throws IOException {
} else {
result = (TimestampWritable) previous;
}
- Timestamp ts = new Timestamp(0);
- long millis = (data.next() + WriterImpl.BASE_TIMESTAMP) *
- WriterImpl.MILLIS_PER_SECOND;
+ long millis = (data.next() + base_timestamp) * WriterImpl.MILLIS_PER_SECOND;
int newNanos = parseNanos(nanos.next());
// fix the rounding when we divided by 1000.
if (millis >= 0) {
@@ -1097,7 +1135,12 @@ Object next(Object previous) throws IOException {
} else {
millis -= newNanos / 1000000;
}
- ts.setTime(millis);
+ // Adjust the timezone difference between reader and writer. This will
+ // also account for day light savings. offset will be 0 if new reader's
+ // tries to read old files as read & write timezones will be same
+ long offset = writerTimeZone.getOffset(millis) - readerTimeZone.getOffset(millis);
+ long adjustedMillis = millis + offset;
+ Timestamp ts = new Timestamp(adjustedMillis);
ts.setNanos(newNanos);
result.set(ts);
}
@@ -1167,12 +1210,13 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
- reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true);
+ reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+ streams.get(name), true);
}
@Override
@@ -1244,13 +1288,13 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
valueStream = streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.DATA));
- scaleStream = createIntegerReader(encodings.get(columnId).getKind(), streams.get(
- new StreamName(columnId, OrcProto.Stream.Kind.SECONDARY)), true);
+ scaleStream = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+ streams.get(new StreamName(columnId, OrcProto.Stream.Kind.SECONDARY)), true);
}
@Override
@@ -1349,11 +1393,11 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
// For each stripe, checks the encoding and initializes the appropriate
// reader
- switch (encodings.get(columnId).getKind()) {
+ switch (stripeFooter.getColumnsList().get(columnId).getKind()) {
case DIRECT:
case DIRECT_V2:
reader = new StringDirectTreeReader(path, columnId, conf);
@@ -1364,9 +1408,9 @@ void startStripe(Map streams,
break;
default:
throw new IllegalArgumentException("Unsupported encoding " +
- encodings.get(columnId).getKind());
+ stripeFooter.getColumnsList().get(columnId).getKind());
}
- reader.startStripe(streams, encodings);
+ reader.startStripe(streams, stripeFooter);
}
@Override
@@ -1486,13 +1530,13 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
stream = streams.get(name);
- lengths = createIntegerReader(encodings.get(columnId).getKind(),
+ lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
streams.get(new StreamName(columnId, OrcProto.Stream.Kind.LENGTH)),
false);
}
@@ -1585,12 +1629,12 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
// read the dictionary blob
- int dictionarySize = encodings.get(columnId).getDictionarySize();
+ int dictionarySize = stripeFooter.getColumnsList().get(columnId).getDictionarySize();
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DICTIONARY_DATA);
InStream in = streams.get(name);
@@ -1610,7 +1654,7 @@ void startStripe(Map streams,
name = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH);
in = streams.get(name);
if (in != null) { // Guard against empty LENGTH stream.
- IntegerReader lenReader = createIntegerReader(encodings.get(columnId)
+ IntegerReader lenReader = createIntegerReader(stripeFooter.getColumnsList().get(columnId)
.getKind(), in, false);
int offset = 0;
if (dictionaryOffsets == null ||
@@ -1627,7 +1671,7 @@ void startStripe(Map streams,
// set up the row reader
name = new StreamName(columnId, OrcProto.Stream.Kind.DATA);
- reader = createIntegerReader(encodings.get(columnId).getKind(),
+ reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
streams.get(name), false);
}
@@ -1869,8 +1913,8 @@ Object nextVector(Object previousVector, long batchSize) throws IOException {
private final String[] fieldNames;
StructTreeReader(Path path, int columnId,
- List types,
- boolean[] included, Configuration conf) throws IOException {
+ List types,
+ boolean[] included, Configuration conf) throws IOException {
super(path, columnId, conf);
OrcProto.Type type = types.get(columnId);
int fieldCount = type.getFieldNamesCount();
@@ -1945,12 +1989,12 @@ Object nextVector(Object previousVector, long batchSize) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
for(TreeReader field: fields) {
if (field != null) {
- field.startStripe(streams, encodings);
+ field.startStripe(streams, stripeFooter);
}
}
}
@@ -1971,8 +2015,9 @@ void skipRows(long items) throws IOException {
private RunLengthByteReader tags;
UnionTreeReader(Path path, int columnId,
- List types,
- boolean[] included, Configuration conf) throws IOException {
+ List types,
+ boolean[] included,
+ Configuration conf) throws IOException {
super(path, columnId, conf);
OrcProto.Type type = types.get(columnId);
int fieldCount = type.getSubtypesCount();
@@ -2020,14 +2065,14 @@ Object nextVector(Object previousVector, long batchSize) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
+ super.startStripe(streams, stripeFooter);
tags = new RunLengthByteReader(streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.DATA)));
for(TreeReader field: fields) {
if (field != null) {
- field.startStripe(streams, encodings);
+ field.startStripe(streams, stripeFooter);
}
}
}
@@ -2050,8 +2095,9 @@ void skipRows(long items) throws IOException {
private IntegerReader lengths = null;
ListTreeReader(Path path, int columnId,
- List types,
- boolean[] included, Configuration conf) throws IOException {
+ List types,
+ boolean[] included,
+ Configuration conf) throws IOException {
super(path, columnId, conf);
OrcProto.Type type = types.get(columnId);
elementReader = createTreeReader(path, type.getSubtypes(0), types,
@@ -2112,14 +2158,14 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
- lengths = createIntegerReader(encodings.get(columnId).getKind(),
+ super.startStripe(streams, stripeFooter);
+ lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.LENGTH)), false);
if (elementReader != null) {
- elementReader.startStripe(streams, encodings);
+ elementReader.startStripe(streams, stripeFooter);
}
}
@@ -2140,9 +2186,10 @@ void skipRows(long items) throws IOException {
private IntegerReader lengths = null;
MapTreeReader(Path path,
- int columnId,
- List types,
- boolean[] included, Configuration conf) throws IOException {
+ int columnId,
+ List types,
+ boolean[] included,
+ Configuration conf) throws IOException {
super(path, columnId, conf);
OrcProto.Type type = types.get(columnId);
int keyColumn = type.getSubtypes(0);
@@ -2206,17 +2253,17 @@ void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
@Override
void startStripe(Map streams,
- List encodings
+ OrcProto.StripeFooter stripeFooter
) throws IOException {
- super.startStripe(streams, encodings);
- lengths = createIntegerReader(encodings.get(columnId).getKind(),
+ super.startStripe(streams, stripeFooter);
+ lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
streams.get(new StreamName(columnId,
OrcProto.Stream.Kind.LENGTH)), false);
if (keyReader != null) {
- keyReader.startStripe(streams, encodings);
+ keyReader.startStripe(streams, stripeFooter);
}
if (valueReader != null) {
- valueReader.startStripe(streams, encodings);
+ valueReader.startStripe(streams, stripeFooter);
}
}
@@ -2233,11 +2280,10 @@ void skipRows(long items) throws IOException {
}
private static TreeReader createTreeReader(Path path,
- int columnId,
- List types,
- boolean[] included,
- Configuration conf
- ) throws IOException {
+ int columnId,
+ List types,
+ boolean[] included,
+ Configuration conf) throws IOException {
OrcProto.Type type = types.get(columnId);
switch (type.getKind()) {
case BOOLEAN:
@@ -2799,7 +2845,7 @@ private void readStripe() throws IOException {
} else {
readPartialDataStreams(stripe);
}
- reader.startStripe(streams, stripeFooter.getColumnsList());
+ reader.startStripe(streams, stripeFooter);
// if we skipped the first row group, move the pointers forward
if (rowInStripe != 0) {
seekToRowEntry((int) (rowInStripe / rowIndexStride));
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
index 79dc5a1..918cb28 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
@@ -26,10 +26,10 @@
import java.nio.ByteBuffer;
import java.sql.Timestamp;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
+import java.util.TimeZone;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
@@ -796,6 +796,7 @@ void writeStripe(OrcProto.StripeFooter.Builder builder,
foundNulls = false;
builder.addColumns(getEncoding());
+ builder.setWriterTimezone(TimeZone.getDefault().getID());
if (rowIndexStream != null) {
if (rowIndex.getEntryCount() != requiredIndexEntries) {
throw new IllegalArgumentException("Column has wrong number of " +
@@ -1511,13 +1512,13 @@ void recordPosition(PositionRecorder recorder) throws IOException {
}
static final int MILLIS_PER_SECOND = 1000;
- static final long BASE_TIMESTAMP =
- Timestamp.valueOf("2015-01-01 00:00:00").getTime() / MILLIS_PER_SECOND;
+ static final String BASE_TIMESTAMP_STRING = "2015-01-01 00:00:00";
private static class TimestampTreeWriter extends TreeWriter {
private final IntegerWriter seconds;
private final IntegerWriter nanos;
private final boolean isDirectV2;
+ private final long base_timestamp;
TimestampTreeWriter(int columnId,
ObjectInspector inspector,
@@ -1530,6 +1531,8 @@ void recordPosition(PositionRecorder recorder) throws IOException {
this.nanos = createIntegerWriter(writer.createStream(id,
OrcProto.Stream.Kind.SECONDARY), false, isDirectV2, writer);
recordPosition(rowIndexPosition);
+ // for unit tests to set different time zones
+ this.base_timestamp = Timestamp.valueOf(BASE_TIMESTAMP_STRING).getTime() / MILLIS_PER_SECOND;
}
@Override
@@ -1550,7 +1553,7 @@ void write(Object obj) throws IOException {
((TimestampObjectInspector) inspector).
getPrimitiveJavaObject(obj);
indexStatistics.updateTimestamp(val);
- seconds.write((val.getTime() / MILLIS_PER_SECOND) - BASE_TIMESTAMP);
+ seconds.write((val.getTime() / MILLIS_PER_SECOND) - base_timestamp);
nanos.write(formatNanos(val.getNanos()));
if (createBloomFilter) {
bloomFilter.addLong(val.getTime());
diff --git ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto
index 6d5f482..3b7a9b3 100644
--- ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto
+++ ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto
@@ -129,6 +129,7 @@ message ColumnEncoding {
message StripeFooter {
repeated Stream streams = 1;
repeated ColumnEncoding columns = 2;
+ optional string writerTimezone = 3;
}
message Type {
diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 2cc3d7a..5baf0b3 100644
--- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -1638,14 +1638,14 @@ public void testCombinationInputFormatWithAcid() throws Exception {
assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000",
split.getPath().toString());
assertEquals(0, split.getStart());
- assertEquals(607, split.getLength());
+ assertEquals(625, split.getLength());
split = (HiveInputFormat.HiveInputSplit) splits[1];
assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
split.inputFormatClassName());
assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001",
split.getPath().toString());
assertEquals(0, split.getStart());
- assertEquals(629, split.getLength());
+ assertEquals(647, split.getLength());
CombineHiveInputFormat.CombineHiveInputSplit combineSplit =
(CombineHiveInputFormat.CombineHiveInputSplit) splits[2];
assertEquals(BUCKETS, combineSplit.getNumPaths());
@@ -1653,7 +1653,7 @@ public void testCombinationInputFormatWithAcid() throws Exception {
assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0",
combineSplit.getPath(bucket).toString());
assertEquals(0, combineSplit.getOffset(bucket));
- assertEquals(241, combineSplit.getLength(bucket));
+ assertEquals(253, combineSplit.getLength(bucket));
}
String[] hosts = combineSplit.getLocations();
assertEquals(2, hosts.length);
diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcTimezone1.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcTimezone1.java
new file mode 100644
index 0000000..70c2b0e
--- /dev/null
+++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcTimezone1.java
@@ -0,0 +1,428 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.orc;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertNotNull;
+
+import java.io.File;
+import java.sql.Timestamp;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hive.common.util.HiveTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import com.google.common.collect.Lists;
+
+/**
+ *
+ */
+@RunWith(Parameterized.class)
+public class TestOrcTimezone1 {
+ Path workDir = new Path(System.getProperty("test.tmp.dir",
+ "target" + File.separator + "test" + File.separator + "tmp"));
+ Configuration conf;
+ FileSystem fs;
+ Path testFilePath;
+ String writerTimeZone;
+ String readerTimeZone;
+ static TimeZone defaultTimeZone = TimeZone.getDefault();
+
+ public TestOrcTimezone1(String writerTZ, String readerTZ) {
+ this.writerTimeZone = writerTZ;
+ this.readerTimeZone = readerTZ;
+ }
+
+ @Parameterized.Parameters
+ public static Collection