diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestTimestampTZ.java b/common/src/test/org/apache/hadoop/hive/common/type/TestTimestampTZ.java
new file mode 100644
index 0000000..83de30d
--- /dev/null
+++ b/common/src/test/org/apache/hadoop/hive/common/type/TestTimestampTZ.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common.type;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.TimeZone;
+import java.util.concurrent.ThreadLocalRandom;
+
+public class TestTimestampTZ {
+
+ private static TimeZone defaultTZ;
+ private static final String[] IDs = TimeZone.getAvailableIDs();
+
+ @BeforeClass
+ public static void storeDefaultTZ() {
+ defaultTZ = TimeZone.getDefault();
+ }
+
+ @Before
+ public void setTZ() {
+ int index = ThreadLocalRandom.current().nextInt(IDs.length);
+ TimeZone.setDefault(TimeZone.getTimeZone(IDs[index]));
+ }
+
+ @AfterClass
+ public static void restoreTZ() {
+ TimeZone.setDefault(defaultTZ);
+ }
+
+ @Test
+ public void testParse() {
+ String s1 = "2016-01-03 12:26:34.0123";
+ String s2 = s1 + " UTC";
+ Assert.assertEquals(s1 + " GMT", TimestampTZ.valueOf(s2).toString());
+ Assert.assertEquals(s1 + " GMT+08:00", TimestampTZ.valueOf(s1, "Asia/Shanghai").toString());
+ }
+
+ @Test
+ public void testHandleDST() {
+ // Same timezone can have different offset due to DST
+ String s1 = "2005-01-03 02:01:00";
+ Assert.assertEquals(s1 + ".0 GMT", TimestampTZ.valueOf(s1, "Europe/London").toString());
+ String s2 = "2005-06-03 02:01:00.30547";
+ Assert.assertEquals(s2 + " GMT+01:00", TimestampTZ.valueOf(s2, "Europe/London").toString());
+ // Can print time with DST properly
+ String s3 = "2005-04-03 02:01:00.04067";
+ Assert.assertEquals("2005-04-03 03:01:00.04067 GMT-07:00",
+ TimestampTZ.valueOf(s3, "America/Los_Angeles").toString());
+ }
+
+ @Test
+ public void testBadZoneID() {
+ try {
+ new TimestampTZ(0, "Foo id");
+ Assert.fail("Invalid timezone ID should cause exception");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ }
+}
diff --git a/contrib/src/test/queries/clientnegative/serde_regex.q b/contrib/src/test/queries/clientnegative/serde_regex.q
index a676338..9d27768 100644
--- a/contrib/src/test/queries/clientnegative/serde_regex.q
+++ b/contrib/src/test/queries/clientnegative/serde_regex.q
@@ -8,7 +8,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status INT,
size INT,
@@ -25,7 +25,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status INT,
size INT,
diff --git a/contrib/src/test/queries/clientpositive/serde_regex.q b/contrib/src/test/queries/clientpositive/serde_regex.q
index d75d607..8aa3eda 100644
--- a/contrib/src/test/queries/clientpositive/serde_regex.q
+++ b/contrib/src/test/queries/clientpositive/serde_regex.q
@@ -6,7 +6,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -23,7 +23,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -39,4 +39,4 @@ STORED AS TEXTFILE;
LOAD DATA LOCAL INPATH "../../data/files/apache.access.log" INTO TABLE serde_regex;
LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" INTO TABLE serde_regex;
-SELECT * FROM serde_regex ORDER BY time;
\ No newline at end of file
+SELECT * FROM serde_regex ORDER BY `time`;
\ No newline at end of file
diff --git a/contrib/src/test/results/clientnegative/serde_regex.q.out b/contrib/src/test/results/clientnegative/serde_regex.q.out
index 0f9b036..df6918d 100644
--- a/contrib/src/test/results/clientnegative/serde_regex.q.out
+++ b/contrib/src/test/results/clientnegative/serde_regex.q.out
@@ -10,7 +10,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status INT,
size INT,
@@ -29,7 +29,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status INT,
size INT,
@@ -62,7 +62,7 @@ PREHOOK: query: CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status INT,
size INT,
diff --git a/contrib/src/test/results/clientpositive/serde_regex.q.out b/contrib/src/test/results/clientpositive/serde_regex.q.out
index 2984293..1ce89e1 100644
--- a/contrib/src/test/results/clientpositive/serde_regex.q.out
+++ b/contrib/src/test/results/clientpositive/serde_regex.q.out
@@ -3,7 +3,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -21,7 +21,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -54,7 +54,7 @@ PREHOOK: query: CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -73,7 +73,7 @@ POSTHOOK: query: CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -104,11 +104,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" I
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@serde_regex
-PREHOOK: query: SELECT * FROM serde_regex ORDER BY time
+PREHOOK: query: SELECT * FROM serde_regex ORDER BY `time`
PREHOOK: type: QUERY
PREHOOK: Input: default@serde_regex
#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM serde_regex ORDER BY time
+POSTHOOK: query: SELECT * FROM serde_regex ORDER BY `time`
POSTHOOK: type: QUERY
POSTHOOK: Input: default@serde_regex
#### A masked pattern was here ####
diff --git a/hbase-handler/src/test/queries/positive/hbase_timestamp.q b/hbase-handler/src/test/queries/positive/hbase_timestamp.q
index 0350afe..6ae2c30 100644
--- a/hbase-handler/src/test/queries/positive/hbase_timestamp.q
+++ b/hbase-handler/src/test/queries/positive/hbase_timestamp.q
@@ -1,5 +1,5 @@
DROP TABLE hbase_table;
-CREATE TABLE hbase_table (key string, value string, time timestamp)
+CREATE TABLE hbase_table (key string, value string, `time` timestamp)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
DESC extended hbase_table;
@@ -7,14 +7,14 @@ FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, "2012-02-23 10:14
SELECT * FROM hbase_table;
DROP TABLE hbase_table;
-CREATE TABLE hbase_table (key string, value string, time bigint)
+CREATE TABLE hbase_table (key string, value string, `time` bigint)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754000 WHERE (key % 17) = 0;
-SELECT key, value, cast(time as timestamp) FROM hbase_table;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table;
DROP TABLE hbase_table;
-CREATE TABLE hbase_table (key string, value string, time bigint)
+CREATE TABLE hbase_table (key string, value string, `time` bigint)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
insert overwrite table hbase_table select key,value,ts FROM
@@ -25,23 +25,23 @@ insert overwrite table hbase_table select key,value,ts FROM
) T;
explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` < 200000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` < 200000000000;
explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` > 100000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` > 100000000000;
explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` <= 100000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` <= 100000000000;
explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` >= 200000000000;
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` >= 200000000000;
DROP TABLE hbase_table;
-CREATE TABLE hbase_table(key string, value map, time timestamp)
+CREATE TABLE hbase_table(key string, value map, `time` timestamp)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:,:timestamp");
FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, MAP("name", CONCAT(value, " Jr")), "2012-02-23 10:14:52" WHERE (key % 17) = 0;
diff --git a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
index 3918121..e719b08 100644
--- a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
@@ -2,13 +2,13 @@ PREHOOK: query: DROP TABLE hbase_table
PREHOOK: type: DROPTABLE
POSTHOOK: query: DROP TABLE hbase_table
POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table (key string, value string, time timestamp)
+PREHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` timestamp)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, time timestamp)
+POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` timestamp)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
POSTHOOK: type: CREATETABLE
@@ -69,13 +69,13 @@ POSTHOOK: query: DROP TABLE hbase_table
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@hbase_table
POSTHOOK: Output: default@hbase_table
-PREHOOK: query: CREATE TABLE hbase_table (key string, value string, time bigint)
+PREHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, time bigint)
+POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
POSTHOOK: type: CREATETABLE
@@ -89,11 +89,11 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value,
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@hbase_table
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table
+PREHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table
PREHOOK: type: QUERY
PREHOOK: Input: default@hbase_table
#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table
+POSTHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table
POSTHOOK: type: QUERY
POSTHOOK: Input: default@hbase_table
#### A masked pattern was here ####
@@ -125,13 +125,13 @@ POSTHOOK: query: DROP TABLE hbase_table
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@hbase_table
POSTHOOK: Output: default@hbase_table
-PREHOOK: query: CREATE TABLE hbase_table (key string, value string, time bigint)
+PREHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, time bigint)
+POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
POSTHOOK: type: CREATETABLE
@@ -156,10 +156,10 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@hbase_table
PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` < 200000000000
PREHOOK: type: QUERY
POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` < 200000000000
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
@@ -193,21 +193,21 @@ STAGE PLANS:
Processor Tree:
ListSink
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+PREHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` < 200000000000
PREHOOK: type: QUERY
PREHOOK: Input: default@hbase_table
#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+POSTHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` < 200000000000
POSTHOOK: type: QUERY
POSTHOOK: Input: default@hbase_table
#### A masked pattern was here ####
165 val_165 1973-03-03 01:46:40
396 val_396 1973-03-03 01:46:40
PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` > 100000000000
PREHOOK: type: QUERY
POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` > 100000000000
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
@@ -241,11 +241,11 @@ STAGE PLANS:
Processor Tree:
ListSink
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+PREHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` > 100000000000
PREHOOK: type: QUERY
PREHOOK: Input: default@hbase_table
#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+POSTHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` > 100000000000
POSTHOOK: type: QUERY
POSTHOOK: Input: default@hbase_table
#### A masked pattern was here ####
@@ -254,10 +254,10 @@ POSTHOOK: Input: default@hbase_table
296 val_296 1976-05-03 12:33:20
333 val_333 1976-05-03 12:33:20
PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` <= 100000000000
PREHOOK: type: QUERY
POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` <= 100000000000
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
@@ -291,21 +291,21 @@ STAGE PLANS:
Processor Tree:
ListSink
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+PREHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` <= 100000000000
PREHOOK: type: QUERY
PREHOOK: Input: default@hbase_table
#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+POSTHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` <= 100000000000
POSTHOOK: type: QUERY
POSTHOOK: Input: default@hbase_table
#### A masked pattern was here ####
165 val_165 1973-03-03 01:46:40
396 val_396 1973-03-03 01:46:40
PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` >= 200000000000
PREHOOK: type: QUERY
POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` >= 200000000000
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
@@ -339,11 +339,11 @@ STAGE PLANS:
Processor Tree:
ListSink
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+PREHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` >= 200000000000
PREHOOK: type: QUERY
PREHOOK: Input: default@hbase_table
#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+POSTHOOK: query: SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` >= 200000000000
POSTHOOK: type: QUERY
POSTHOOK: Input: default@hbase_table
#### A masked pattern was here ####
@@ -359,13 +359,13 @@ POSTHOOK: query: DROP TABLE hbase_table
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@hbase_table
POSTHOOK: Output: default@hbase_table
-PREHOOK: query: CREATE TABLE hbase_table(key string, value map, time timestamp)
+PREHOOK: query: CREATE TABLE hbase_table(key string, value map, `time` timestamp)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:,:timestamp")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table(key string, value map, time timestamp)
+POSTHOOK: query: CREATE TABLE hbase_table(key string, value map, `time` timestamp)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:,:timestamp")
POSTHOOK: type: CREATETABLE
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
index 93f093f..69f8277 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.serde2.thrift.Type;
import org.apache.hive.service.cli.TableSchema;
@@ -441,6 +442,8 @@ private Object evaluate(Type type, Object value) {
return value;
case TIMESTAMP_TYPE:
return Timestamp.valueOf((String) value);
+ case TIMESTAMPTZ_TYPE:
+ return TimestampTZ.valueOf((String) value);
case DECIMAL_TYPE:
return new BigDecimal((String)value);
case DATE_TYPE:
diff --git a/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java b/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java
index 38918f0..27abd9a 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.thrift.Type;
@@ -105,6 +106,8 @@ static String columnClassName(Type hiveType, JdbcColumnAttributes columnAttribut
return HiveIntervalYearMonth.class.getName();
case INTERVAL_DAY_TIME_TYPE:
return HiveIntervalDayTime.class.getName();
+ case TIMESTAMPTZ_TYPE:
+ return TimestampTZ.class.getName();
default:
return String.class.getName();
}
@@ -142,6 +145,8 @@ static Type typeStringToHiveType(String type) throws SQLException {
return Type.DATE_TYPE;
} else if ("timestamp".equalsIgnoreCase(type)) {
return Type.TIMESTAMP_TYPE;
+ } else if (serdeConstants.TIMESTAMPTZ_TYPE_NAME.equalsIgnoreCase(type)) {
+ return Type.TIMESTAMPTZ_TYPE;
} else if ("interval_year_month".equalsIgnoreCase(type)) {
return Type.INTERVAL_YEAR_MONTH_TYPE;
} else if ("interval_day_time".equalsIgnoreCase(type)) {
@@ -195,6 +200,8 @@ static String getColumnTypeName(String type) throws SQLException {
return serdeConstants.BIGINT_TYPE_NAME;
} else if ("timestamp".equalsIgnoreCase(type)) {
return serdeConstants.TIMESTAMP_TYPE_NAME;
+ } else if (serdeConstants.TIMESTAMPTZ_TYPE_NAME.equalsIgnoreCase(type)) {
+ return serdeConstants.TIMESTAMPTZ_TYPE_NAME;
} else if ("date".equalsIgnoreCase(type)) {
return serdeConstants.DATE_TYPE_NAME;
} else if ("interval_year_month".equalsIgnoreCase(type)) {
@@ -305,6 +312,8 @@ static int columnPrecision(Type hiveType, JdbcColumnAttributes columnAttributes)
case INTERVAL_DAY_TIME_TYPE:
// -ddddddddd hh:mm:ss.nnnnnnnnn
return 29;
+ case TIMESTAMPTZ_TYPE:
+ return 39;
default:
return Integer.MAX_VALUE;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index de74c3e..eb29759 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -385,6 +385,7 @@
system.registerGenericUDF(serdeConstants.DATE_TYPE_NAME, GenericUDFToDate.class);
system.registerGenericUDF(serdeConstants.TIMESTAMP_TYPE_NAME, GenericUDFTimestamp.class);
+ system.registerGenericUDF(serdeConstants.TIMESTAMPTZ_TYPE_NAME, GenericUDFToTimestampTZ.class);
system.registerGenericUDF(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME, GenericUDFToIntervalYearMonth.class);
system.registerGenericUDF(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME, GenericUDFToIntervalDayTime.class);
system.registerGenericUDF(serdeConstants.BINARY_TYPE_NAME, GenericUDFToBinary.class);
@@ -1472,7 +1473,8 @@ public static boolean isOpCast(GenericUDF genericUDF) {
udfClass == UDFToShort.class || udfClass == UDFToString.class ||
udfClass == GenericUDFToVarchar.class || udfClass == GenericUDFToChar.class ||
udfClass == GenericUDFTimestamp.class || udfClass == GenericUDFToBinary.class ||
- udfClass == GenericUDFToDate.class || udfClass == GenericUDFToDecimal.class;
+ udfClass == GenericUDFToDate.class || udfClass == GenericUDFToDecimal.class ||
+ udfClass == GenericUDFToTimestampTZ.class;
}
/**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index f28d33e..e56fab1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -452,6 +452,8 @@ private int getSize(int pos, PrimitiveCategory category) {
return javaObjectOverHead;
case TIMESTAMP:
return javaObjectOverHead + javaSizePrimitiveType;
+ case TIMESTAMPTZ:
+ return javaObjectOverHead + 2 * javaSizePrimitiveType;
default:
return javaSizeUnknownType;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
index 7be628e..c44d778 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
@@ -37,8 +37,8 @@
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.tez.TezJobMonitor;
import org.apache.hadoop.hive.ql.exec.vector.VectorFileSinkOperator;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
@@ -222,6 +222,7 @@ public Kryo create() {
KryoWithHooks kryo = new KryoWithHooks();
kryo.register(java.sql.Date.class, new SqlDateSerializer());
kryo.register(java.sql.Timestamp.class, new TimestampSerializer());
+ kryo.register(TimestampTZ.class, new TimestampTZSerializer());
kryo.register(Path.class, new PathSerializer());
kryo.register(Arrays.asList("").getClass(), new ArraysAsListSerializer());
@@ -306,6 +307,30 @@ public void write(Kryo kryo, Output output, Timestamp ts) {
}
/**
+ * Kryo serializer for timestamp with time zone.
+ */
+ private static class TimestampTZSerializer extends
+ com.esotericsoftware.kryo.Serializer {
+
+ @Override
+ public void write(Kryo kryo, Output output, TimestampTZ timestampTZ) {
+ output.writeLong(timestampTZ.getTime());
+ output.writeInt(timestampTZ.getNanos());
+ output.writeInt(timestampTZ.getOffsetInMin());
+ }
+
+ @Override
+ public TimestampTZ read(Kryo kryo, Input input, Class aClass) {
+ long time = input.readLong();
+ int nanos = input.readInt();
+ int offset = input.readInt();
+ TimestampTZ tstz = new TimestampTZ(time, offset);
+ tstz.setNanos(nanos);
+ return tstz;
+ }
+ }
+
+ /**
* Custom Kryo serializer for sql date, otherwise Kryo gets confused between
* java.sql.Date and java.util.Date while deserializing
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
index ba41518..6a1fdd1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
@@ -176,6 +176,9 @@ public static RelDataType convert(PrimitiveTypeInfo type, RelDataTypeFactory dtF
case TIMESTAMP:
convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP);
break;
+ case TIMESTAMPTZ:
+ convertedType = dtFactory.createSqlType(SqlTypeName.OTHER);
+ break;
case INTERVAL_YEAR_MONTH:
convertedType = dtFactory.createSqlIntervalType(
new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1,1)));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 8b0db4a..0c30bd5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -200,6 +200,7 @@
TokenToTypeName.put(HiveParser.TOK_DATE, serdeConstants.DATE_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_DATETIME, serdeConstants.DATETIME_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME);
+ TokenToTypeName.put(HiveParser.TOK_TIMESTAMPTZ, serdeConstants.TIMESTAMPTZ_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_INTERVAL_YEAR_MONTH, serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_INTERVAL_DAY_TIME, serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 7ceb005..78c5fdf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -128,6 +128,9 @@ KW_PRECISION: 'PRECISION';
KW_DATE: 'DATE';
KW_DATETIME: 'DATETIME';
KW_TIMESTAMP: 'TIMESTAMP';
+KW_TIMESTAMPTZ: 'TIMESTAMPTZ';
+KW_TIME: 'TIME';
+KW_ZONE: 'ZONE';
KW_INTERVAL: 'INTERVAL';
KW_DECIMAL: 'DECIMAL';
KW_STRING: 'STRING';
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 62bbcc6..9fa17c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -120,6 +120,7 @@ TOK_DATELITERAL;
TOK_DATETIME;
TOK_TIMESTAMP;
TOK_TIMESTAMPLITERAL;
+TOK_TIMESTAMPTZ;
TOK_INTERVAL_YEAR_MONTH;
TOK_INTERVAL_YEAR_MONTH_LITERAL;
TOK_INTERVAL_DAY_TIME;
@@ -472,6 +473,9 @@ import org.apache.hadoop.hive.conf.HiveConf;
xlateMap.put("KW_DATE", "DATE");
xlateMap.put("KW_DATETIME", "DATETIME");
xlateMap.put("KW_TIMESTAMP", "TIMESTAMP");
+ xlateMap.put("KW_TIMESTAMPTZ", "TIMESTAMPTZ");
+ xlateMap.put("KW_TIME", "TIME");
+ xlateMap.put("KW_ZONE", "ZONE");
xlateMap.put("KW_STRING", "STRING");
xlateMap.put("KW_BINARY", "BINARY");
xlateMap.put("KW_ARRAY", "ARRAY");
@@ -2266,6 +2270,8 @@ primitiveType
| KW_DATE -> TOK_DATE
| KW_DATETIME -> TOK_DATETIME
| KW_TIMESTAMP -> TOK_TIMESTAMP
+ | KW_TIMESTAMPTZ -> TOK_TIMESTAMPTZ
+ | KW_TIMESTAMP KW_WITH KW_TIME KW_ZONE -> TOK_TIMESTAMPTZ
// Uncomment to allow intervals as table column types
//| KW_INTERVAL KW_YEAR KW_TO KW_MONTH -> TOK_INTERVAL_YEAR_MONTH
//| KW_INTERVAL KW_DAY KW_TO KW_SECOND -> TOK_INTERVAL_DAY_TIME
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 9ba1865..44384af 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -673,6 +673,8 @@ nonReserved
| KW_VALIDATE
| KW_NOVALIDATE
| KW_KEY
+ | KW_ZONE
+ | KW_TIMESTAMPTZ
;
//The following SQL2011 reserved keywords are used as cast function name only, but not as identifiers.
@@ -701,4 +703,5 @@ sql11ReservedKeywordsUsedAsIdentifier
| KW_CONSTRAINT
| KW_REFERENCES
| KW_PRECISION
+ | KW_TIME
;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 82080eb..e92b2e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -732,6 +732,8 @@ public ColumnExprProcessor getColumnExprProcessor() {
serdeConstants.DATE_TYPE_NAME);
conversionFunctionTextHashMap.put(HiveParser.TOK_TIMESTAMP,
serdeConstants.TIMESTAMP_TYPE_NAME);
+ conversionFunctionTextHashMap.put(HiveParser.TOK_TIMESTAMPTZ,
+ serdeConstants.TIMESTAMPTZ_TYPE_NAME);
conversionFunctionTextHashMap.put(HiveParser.TOK_INTERVAL_YEAR_MONTH,
serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME);
conversionFunctionTextHashMap.put(HiveParser.TOK_INTERVAL_DAY_TIME,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index a718264..c4037b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -94,6 +94,7 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableDoubleObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableFloatObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveDecimalObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampTZObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableIntObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableShortObjectInspector;
@@ -737,6 +738,8 @@ public static ColStatistics getColStatistics(ColumnStatisticsObj cso, String tab
cs.setNumNulls(csd.getBinaryStats().getNumNulls());
} else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
cs.setAvgColLen(JavaDataModel.get().lengthOfTimestamp());
+ } else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMPTZ_TYPE_NAME)) {
+ cs.setAvgColLen(JavaDataModel.get().lengthOfTimestampTZ());
} else if (colTypeLowerCase.startsWith(serdeConstants.DECIMAL_TYPE_NAME)) {
cs.setAvgColLen(JavaDataModel.get().lengthOfDecimal());
cs.setCountDistint(csd.getDecimalStats().getNumDVs());
@@ -1032,6 +1035,8 @@ public static long getAvgColLenOfFixedLengthTypes(String colType) {
return JavaDataModel.get().primitive2();
} else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
return JavaDataModel.get().lengthOfTimestamp();
+ } else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMPTZ_TYPE_NAME)) {
+ return JavaDataModel.get().lengthOfTimestampTZ();
} else if (colTypeLowerCase.equals(serdeConstants.DATE_TYPE_NAME)) {
return JavaDataModel.get().lengthOfDate();
} else if (colTypeLowerCase.startsWith(serdeConstants.DECIMAL_TYPE_NAME)) {
@@ -1069,6 +1074,8 @@ public static long getSizeOfPrimitiveTypeArraysFromType(String colType, int leng
return JavaDataModel.get().lengthForBooleanArrayOfSize(length);
} else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
return JavaDataModel.get().lengthForTimestampArrayOfSize(length);
+ } else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMPTZ_TYPE_NAME)) {
+ return JavaDataModel.get().lengthForTimestampTZArrayOfSize(length);
} else if (colTypeLowerCase.equals(serdeConstants.DATE_TYPE_NAME)) {
return JavaDataModel.get().lengthForDateArrayOfSize(length);
} else if (colTypeLowerCase.startsWith(serdeConstants.DECIMAL_TYPE_NAME)) {
@@ -1154,6 +1161,8 @@ public static long getWritableSize(ObjectInspector oi, Object value) {
return JavaDataModel.get().primitive1();
} else if (oi instanceof WritableTimestampObjectInspector) {
return JavaDataModel.get().lengthOfTimestamp();
+ } else if (oi instanceof WritableTimestampTZObjectInspector) {
+ return JavaDataModel.get().lengthOfTimestampTZ();
}
return 0;
@@ -1500,6 +1509,8 @@ public static long getDataSizeFromColumnStats(long numRows, List
sizeOf = JavaDataModel.get().lengthForByteArrayOfSize(acl);
} else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
sizeOf = JavaDataModel.get().lengthOfTimestamp();
+ } else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMPTZ_TYPE_NAME)) {
+ sizeOf = JavaDataModel.get().lengthOfTimestampTZ();
} else if (colTypeLowerCase.startsWith(serdeConstants.DECIMAL_TYPE_NAME)) {
sizeOf = JavaDataModel.get().lengthOfDecimal();
} else if (colTypeLowerCase.equals(serdeConstants.DATE_TYPE_NAME)) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToBoolean.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToBoolean.java
index 17b892c..89f9055 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToBoolean.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToBoolean.java
@@ -31,8 +31,10 @@
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
@@ -182,6 +184,14 @@ public BooleanWritable evaluate(DateWritable d) {
}
public BooleanWritable evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public BooleanWritable evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
+ private BooleanWritable evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java
index efae82d..403399f 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java
@@ -26,8 +26,10 @@
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazy.LazyByte;
import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
import org.apache.hadoop.io.BooleanWritable;
@@ -183,10 +185,18 @@ public ByteWritable evaluate(Text i) {
}
public ByteWritable evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public ByteWritable evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
+ private ByteWritable evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
- byteWritable.set((byte)i.getSeconds());
+ byteWritable.set((byte) i.getSeconds());
return byteWritable;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
index 9cbc114..bb5f5ea 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
@@ -26,8 +26,10 @@
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.FloatWritable;
@@ -180,6 +182,14 @@ public DoubleWritable evaluate(Text i) {
}
public DoubleWritable evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public DoubleWritable evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
+ private DoubleWritable evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
index 5808c90..1b9c217 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
@@ -26,8 +26,10 @@
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.FloatWritable;
@@ -181,6 +183,14 @@ public FloatWritable evaluate(Text i) {
}
public FloatWritable evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public FloatWritable evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
+ private FloatWritable evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
index a7551cb..bddf687 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
@@ -27,8 +27,10 @@
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
import org.apache.hadoop.io.BooleanWritable;
@@ -184,6 +186,14 @@ public IntWritable evaluate(Text i) {
}
}
+ public IntWritable evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public IntWritable evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
/**
* Convert from Timestamp to an integer. This is called for CAST(... AS INT)
*
@@ -191,7 +201,7 @@ public IntWritable evaluate(Text i) {
* The Timestamp value to convert
* @return IntWritable
*/
- public IntWritable evaluate(TimestampWritable i) {
+ private IntWritable evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
index c961d14..27d3df1 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
@@ -26,8 +26,10 @@
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazy.LazyLong;
import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
import org.apache.hadoop.io.BooleanWritable;
@@ -195,6 +197,14 @@ public LongWritable evaluate(Text i) {
}
public LongWritable evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public LongWritable evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
+ private LongWritable evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java
index 570408a..1004df4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java
@@ -20,15 +20,16 @@
import org.apache.hadoop.hive.ql.exec.UDF;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToDouble;
import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToLong;
import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastDoubleToLong;
import org.apache.hadoop.hive.ql.exec.vector.expressions.CastTimestampToLong;
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazy.LazyShort;
import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
import org.apache.hadoop.io.BooleanWritable;
@@ -185,6 +186,14 @@ public ShortWritable evaluate(Text i) {
}
public ShortWritable evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public ShortWritable evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
+ private ShortWritable evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java
index 5cacd59..81ca965 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToString.java
@@ -24,8 +24,10 @@
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
import org.apache.hadoop.hive.serde2.lazy.LazyLong;
import org.apache.hadoop.io.BooleanWritable;
@@ -144,6 +146,14 @@ public Text evaluate(DateWritable d) {
}
public Text evaluate(TimestampWritable i) {
+ return evalTS(i);
+ }
+
+ public Text evaluate(TimestampTZWritable i) {
+ return evalTS(i);
+ }
+
+ private Text evalTS(TimestampWritableBase i) {
if (i == null) {
return null;
} else {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
index 259fde8..4867955 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
@@ -403,6 +403,7 @@ protected void obtainDateConverter(ObjectInspector[] arguments, int i,
case TIMESTAMP:
case DATE:
case VOID:
+ case TIMESTAMPTZ:
outOi = PrimitiveObjectInspectorFactory.writableDateObjectInspector;
break;
default:
@@ -499,6 +500,7 @@ protected Date getDateValue(DeferredObject[] arguments, int i, PrimitiveCategory
break;
case TIMESTAMP:
case DATE:
+ case TIMESTAMPTZ:
Object writableValue = converters[i].convert(obj);
date = ((DateWritable) writableValue).get();
break;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToTimestampTZ.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToTimestampTZ.java
new file mode 100644
index 0000000..e41a024
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToTimestampTZ.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter.TimestampTZConverter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+
+/**
+ * Convert from string to TIMESTAMP WITH TIME ZONE.
+ */
+@Description(name = "timestamp with time zone",
+ value = "CAST(STRING as TIMESTAMP WITH TIME ZONE) - returns the" +
+ "timestamp with time zone represented by string.",
+ extended = "The string should be of format 'yyyy-MM-dd HH:mm:ss.[fff...] TimezoneID'. " +
+ "TimezoneID needs to be understood by java.util.TimeZone.")
+public class GenericUDFToTimestampTZ extends GenericUDF {
+
+ private transient PrimitiveObjectInspector argumentOI;
+ private transient TimestampTZConverter converter;
+
+ @Override
+ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+ if (arguments.length < 1) {
+ throw new UDFArgumentLengthException(
+ "The function CAST as TIMESTAMP WITH TIME ZONE requires at least one argument, got "
+ + arguments.length);
+ }
+ try {
+ argumentOI = (PrimitiveObjectInspector) arguments[0];
+ switch (argumentOI.getPrimitiveCategory()) {
+ case CHAR:
+ case VARCHAR:
+ case STRING:
+ case TIMESTAMPTZ:
+ break;
+ default:
+ throw new UDFArgumentException("CAST as TIMESTAMP WITH TIME ZONE only allows" +
+ "string or timestamp with time zone types");
+ }
+ } catch (ClassCastException e) {
+ throw new UDFArgumentException(
+ "The function CAST as TIMESTAMP WITH TIME ZONE takes only primitive types");
+ }
+ converter = new TimestampTZConverter(argumentOI,
+ PrimitiveObjectInspectorFactory.writableTimestampTZObjectInspector);
+ return PrimitiveObjectInspectorFactory.writableTimestampTZObjectInspector;
+ }
+
+ @Override
+ public Object evaluate(DeferredObject[] arguments) throws HiveException {
+ Object o0 = arguments[0].get();
+ if (o0 == null) {
+ return null;
+ }
+ return converter.convert(o0);
+ }
+
+ @Override
+ public String getDisplayString(String[] children) {
+ assert (children.length == 1);
+ return "CAST(" + children[0] + " AS TIMESTAMP WITH TIME ZONE)";
+ }
+}
diff --git a/ql/src/test/queries/clientnegative/serde_regex.q b/ql/src/test/queries/clientnegative/serde_regex.q
index c9cfc7d..9a1776a 100644
--- a/ql/src/test/queries/clientnegative/serde_regex.q
+++ b/ql/src/test/queries/clientnegative/serde_regex.q
@@ -4,7 +4,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time TIMESTAMP,
+ `time` TIMESTAMP,
request STRING,
status INT,
size INT,
diff --git a/ql/src/test/queries/clientnegative/serde_regex2.q b/ql/src/test/queries/clientnegative/serde_regex2.q
index a29bb9c..12e802e 100644
--- a/ql/src/test/queries/clientnegative/serde_regex2.q
+++ b/ql/src/test/queries/clientnegative/serde_regex2.q
@@ -5,7 +5,7 @@ USE default;
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -21,4 +21,4 @@ LOAD DATA LOCAL INPATH "../../data/files/apache.access.log" INTO TABLE serde_reg
LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" INTO TABLE serde_regex;
-- raise an exception
-SELECT * FROM serde_regex ORDER BY time;
\ No newline at end of file
+SELECT * FROM serde_regex ORDER BY `time`;
\ No newline at end of file
diff --git a/ql/src/test/queries/clientnegative/serde_regex3.q b/ql/src/test/queries/clientnegative/serde_regex3.q
index 4e91f06..b7810b5 100644
--- a/ql/src/test/queries/clientnegative/serde_regex3.q
+++ b/ql/src/test/queries/clientnegative/serde_regex3.q
@@ -4,7 +4,7 @@ USE default;
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
diff --git a/ql/src/test/queries/clientpositive/create_like.q b/ql/src/test/queries/clientpositive/create_like.q
index bd39731..81172f3 100644
--- a/ql/src/test/queries/clientpositive/create_like.q
+++ b/ql/src/test/queries/clientpositive/create_like.q
@@ -84,7 +84,7 @@ DESCRIBE FORMATTED table6;
drop table table5;
create table orc_table (
-time string)
+`time` string)
stored as ORC tblproperties ("orc.compress"="SNAPPY");
create table orc_table_using_like like orc_table;
diff --git a/ql/src/test/queries/clientpositive/join43.q b/ql/src/test/queries/clientpositive/join43.q
index 12c45a6..b2e10dc 100644
--- a/ql/src/test/queries/clientpositive/join43.q
+++ b/ql/src/test/queries/clientpositive/join43.q
@@ -1,11 +1,11 @@
set hive.mapred.mode=nonstrict;
-create table purchase_history (s string, product string, price double, time int);
+create table purchase_history (s string, product string, price double, `time` int);
insert into purchase_history values ('1', 'Belt', 20.00, 21);
insert into purchase_history values ('1', 'Socks', 3.50, 31);
insert into purchase_history values ('3', 'Belt', 20.00, 51);
insert into purchase_history values ('4', 'Shirt', 15.50, 59);
-create table cart_history (s string, cart_id int, time int);
+create table cart_history (s string, cart_id int, `time` int);
insert into cart_history values ('1', 1, 10);
insert into cart_history values ('1', 2, 20);
insert into cart_history values ('1', 3, 30);
@@ -13,7 +13,7 @@ insert into cart_history values ('1', 4, 40);
insert into cart_history values ('3', 5, 50);
insert into cart_history values ('4', 6, 60);
-create table events (s string, st2 string, n int, time int);
+create table events (s string, st2 string, n int, `time` int);
insert into events values ('1', 'Bob', 1234, 20);
insert into events values ('1', 'Bob', 1234, 30);
insert into events values ('1', 'Bob', 1234, 25);
@@ -26,30 +26,30 @@ select s
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list;
select s
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list;
explain
@@ -57,28 +57,28 @@ select *
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list;
select *
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list;
diff --git a/ql/src/test/queries/clientpositive/serde_regex.q b/ql/src/test/queries/clientpositive/serde_regex.q
index e21c6e1..fc716ed 100644
--- a/ql/src/test/queries/clientpositive/serde_regex.q
+++ b/ql/src/test/queries/clientpositive/serde_regex.q
@@ -4,7 +4,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size INT,
@@ -20,7 +20,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size INT,
@@ -35,9 +35,9 @@ STORED AS TEXTFILE;
LOAD DATA LOCAL INPATH "../../data/files/apache.access.log" INTO TABLE serde_regex;
LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" INTO TABLE serde_regex;
-SELECT * FROM serde_regex ORDER BY time;
+SELECT * FROM serde_regex ORDER BY `time`;
-SELECT host, size, status, time from serde_regex ORDER BY time;
+SELECT host, size, status, `time` from serde_regex ORDER BY `time`;
DROP TABLE serde_regex;
diff --git a/ql/src/test/queries/clientpositive/timestamptz.q b/ql/src/test/queries/clientpositive/timestamptz.q
new file mode 100644
index 0000000..3229b93
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/timestamptz.q
@@ -0,0 +1,5 @@
+explain select cast('2005-01-03 02:01:00 GMT' as timestamp with time zone);
+select cast('2005-01-03 02:01:00 GMT' as timestamp with time zone);
+
+explain select cast('2016-01-03 12:26:34.0123 America/Los_Angeles' as timestamptz);
+select cast('2016-01-03 12:26:34.0123 America/Los_Angeles' as timestamptz);
\ No newline at end of file
diff --git a/ql/src/test/queries/clientpositive/timestamptz_1.q b/ql/src/test/queries/clientpositive/timestamptz_1.q
new file mode 100644
index 0000000..b3215ba
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/timestamptz_1.q
@@ -0,0 +1,37 @@
+set hive.fetch.task.conversion=more;
+
+drop table tstz1;
+
+create table tstz1(t timestamp with time zone);
+
+insert overwrite table tstz1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with time zone);
+select cast(t as boolean) from tstz1;
+select cast(t as tinyint) from tstz1;
+select cast(t as smallint) from tstz1;
+select cast(t as int) from tstz1;
+select cast(t as bigint) from tstz1;
+select cast(t as string) from tstz1;
+
+insert overwrite table tstz1 select '2016-01-03 12:26:34.1 America/Los_Angeles';
+select cast(t as boolean) from tstz1;
+select cast(t as tinyint) from tstz1;
+select cast(t as smallint) from tstz1;
+select cast(t as int) from tstz1;
+select cast(t as bigint) from tstz1;
+select cast(t as string) from tstz1;
+
+insert overwrite table tstz1 select '2016-01-03 12:26:34.0123 America/Los_Angeles';
+select cast(t as boolean) from tstz1;
+select cast(t as tinyint) from tstz1;
+select cast(t as smallint) from tstz1;
+select cast(t as int) from tstz1;
+select cast(t as bigint) from tstz1;
+select cast(t as string) from tstz1;
+
+insert overwrite table tstz1 select '2016-01-03 12:26:34.012300 America/Los_Angeles';
+select cast(t as boolean) from tstz1;
+select cast(t as tinyint) from tstz1;
+select cast(t as smallint) from tstz1;
+select cast(t as int) from tstz1;
+select cast(t as bigint) from tstz1;
+select cast(t as string) from tstz1;
\ No newline at end of file
diff --git a/ql/src/test/queries/clientpositive/timestamptz_2.q b/ql/src/test/queries/clientpositive/timestamptz_2.q
new file mode 100644
index 0000000..5ab8daf
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/timestamptz_2.q
@@ -0,0 +1,19 @@
+set hive.fetch.task.conversion=more;
+
+drop table tstz2;
+
+create table tstz2(t timestamp with time zone);
+
+insert into table tstz2 values
+ ('2005-04-03 03:01:00.04067 GMT-07:00'),('2005-01-03 02:01:00 GMT'),
+ ('2013-06-03 02:01:00.30547 GMT+01:00'),('2016-01-03 12:26:34.0123 GMT+08:00');
+
+select * from tstz2 where t='2005-01-03 02:01:00 GMT';
+
+select * from tstz2 where t>'2013-06-03 02:01:00.30547 GMT+01:00';
+
+select min(t),max(t) from tstz2;
+
+select t from tstz2 group by t order by t;
+
+select * from tstz2 a join tstz2 b on a.t=b.t order by a.t;
\ No newline at end of file
diff --git a/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out b/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
index acecbae..b85e7c5 100644
--- a/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
+++ b/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a binary)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToInteger with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(smallint) _FUNC_(string) _FUNC_(struct) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToInteger with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(smallint) _FUNC_(string) _FUNC_(struct) _FUNC_(timestamp) _FUNC_(timestamp with time zone) _FUNC_(tinyint) _FUNC_(void)
diff --git a/ql/src/test/results/clientnegative/invalid_cast_from_binary_2.q.out b/ql/src/test/results/clientnegative/invalid_cast_from_binary_2.q.out
index 41e1c80..6ff2526 100644
--- a/ql/src/test/results/clientnegative/invalid_cast_from_binary_2.q.out
+++ b/ql/src/test/results/clientnegative/invalid_cast_from_binary_2.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a binary)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToByte with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(void)
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToByte with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(timestamp with time zone) _FUNC_(void)
diff --git a/ql/src/test/results/clientnegative/invalid_cast_from_binary_3.q.out b/ql/src/test/results/clientnegative/invalid_cast_from_binary_3.q.out
index 23e3403..be40eef 100644
--- a/ql/src/test/results/clientnegative/invalid_cast_from_binary_3.q.out
+++ b/ql/src/test/results/clientnegative/invalid_cast_from_binary_3.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a binary)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToShort with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(int) _FUNC_(string) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToShort with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(int) _FUNC_(string) _FUNC_(timestamp) _FUNC_(timestamp with time zone) _FUNC_(tinyint) _FUNC_(void)
diff --git a/ql/src/test/results/clientnegative/invalid_cast_from_binary_4.q.out b/ql/src/test/results/clientnegative/invalid_cast_from_binary_4.q.out
index 3541ef6..30bd5b4 100644
--- a/ql/src/test/results/clientnegative/invalid_cast_from_binary_4.q.out
+++ b/ql/src/test/results/clientnegative/invalid_cast_from_binary_4.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a binary)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToLong with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToLong with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(float) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(timestamp with time zone) _FUNC_(tinyint) _FUNC_(void)
diff --git a/ql/src/test/results/clientnegative/invalid_cast_from_binary_5.q.out b/ql/src/test/results/clientnegative/invalid_cast_from_binary_5.q.out
index 177039c..539cebf 100644
--- a/ql/src/test/results/clientnegative/invalid_cast_from_binary_5.q.out
+++ b/ql/src/test/results/clientnegative/invalid_cast_from_binary_5.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a binary)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToFloat with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToFloat with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(timestamp with time zone) _FUNC_(tinyint) _FUNC_(void)
diff --git a/ql/src/test/results/clientnegative/invalid_cast_from_binary_6.q.out b/ql/src/test/results/clientnegative/invalid_cast_from_binary_6.q.out
index 668380f..27f4105 100644
--- a/ql/src/test/results/clientnegative/invalid_cast_from_binary_6.q.out
+++ b/ql/src/test/results/clientnegative/invalid_cast_from_binary_6.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a binary)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToDouble with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(float) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToDouble with (binary). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(float) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(timestamp with time zone) _FUNC_(tinyint) _FUNC_(void)
diff --git a/ql/src/test/results/clientnegative/serde_regex.q.out b/ql/src/test/results/clientnegative/serde_regex.q.out
index 7892bb2..02ebc9b 100644
--- a/ql/src/test/results/clientnegative/serde_regex.q.out
+++ b/ql/src/test/results/clientnegative/serde_regex.q.out
@@ -9,7 +9,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time TIMESTAMP,
+ `time` TIMESTAMP,
request STRING,
status INT,
size INT,
diff --git a/ql/src/test/results/clientnegative/serde_regex2.q.out b/ql/src/test/results/clientnegative/serde_regex2.q.out
index 1ceb387..a6d6b8f 100644
--- a/ql/src/test/results/clientnegative/serde_regex2.q.out
+++ b/ql/src/test/results/clientnegative/serde_regex2.q.out
@@ -9,7 +9,7 @@ PREHOOK: query: -- Mismatch between the number of matching groups and columns, t
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -28,7 +28,7 @@ POSTHOOK: query: -- Mismatch between the number of matching groups and columns,
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
@@ -59,7 +59,7 @@ POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@serde_regex
PREHOOK: query: -- raise an exception
-SELECT * FROM serde_regex ORDER BY time
+SELECT * FROM serde_regex ORDER BY `time`
PREHOOK: type: QUERY
PREHOOK: Input: default@serde_regex
#### A masked pattern was here ####
diff --git a/ql/src/test/results/clientnegative/serde_regex3.q.out b/ql/src/test/results/clientnegative/serde_regex3.q.out
index 028a24f..2a69e1c 100644
--- a/ql/src/test/results/clientnegative/serde_regex3.q.out
+++ b/ql/src/test/results/clientnegative/serde_regex3.q.out
@@ -9,7 +9,7 @@ PREHOOK: query: -- null input.regex, raise an exception
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size STRING,
diff --git a/ql/src/test/results/clientnegative/wrong_column_type.q.out b/ql/src/test/results/clientnegative/wrong_column_type.q.out
index 6ff90ea..5dd73eb 100644
--- a/ql/src/test/results/clientnegative/wrong_column_type.q.out
+++ b/ql/src/test/results/clientnegative/wrong_column_type.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: CREATE TABLE dest1(a float)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@dest1
-FAILED: NoMatchingMethodException No matching method for class org.apache.hadoop.hive.ql.udf.UDFToFloat with (array). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(tinyint) _FUNC_(void)
+FAILED: NoMatchingMethodException No matching method for class org.apache.hadoop.hive.ql.udf.UDFToFloat with (array). Possible choices: _FUNC_(bigint) _FUNC_(boolean) _FUNC_(decimal(38,18)) _FUNC_(double) _FUNC_(int) _FUNC_(smallint) _FUNC_(string) _FUNC_(timestamp) _FUNC_(timestamp with time zone) _FUNC_(tinyint) _FUNC_(void)
diff --git a/ql/src/test/results/clientpositive/create_like.q.out b/ql/src/test/results/clientpositive/create_like.q.out
index 0111c94..ff141e6 100644
--- a/ql/src/test/results/clientpositive/create_like.q.out
+++ b/ql/src/test/results/clientpositive/create_like.q.out
@@ -614,13 +614,13 @@ POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@table5
POSTHOOK: Output: default@table5
PREHOOK: query: create table orc_table (
-time string)
+`time` string)
stored as ORC tblproperties ("orc.compress"="SNAPPY")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_table
POSTHOOK: query: create table orc_table (
-time string)
+`time` string)
stored as ORC tblproperties ("orc.compress"="SNAPPY")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
diff --git a/ql/src/test/results/clientpositive/join43.q.out b/ql/src/test/results/clientpositive/join43.q.out
index 127d5d0..b34a940 100644
--- a/ql/src/test/results/clientpositive/join43.q.out
+++ b/ql/src/test/results/clientpositive/join43.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: create table purchase_history (s string, product string, price double, time int)
+PREHOOK: query: create table purchase_history (s string, product string, price double, `time` int)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@purchase_history
-POSTHOOK: query: create table purchase_history (s string, product string, price double, time int)
+POSTHOOK: query: create table purchase_history (s string, product string, price double, `time` int)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@purchase_history
@@ -54,11 +54,11 @@ POSTHOOK: Lineage: purchase_history.price EXPRESSION [(values__tmp__table__4)val
POSTHOOK: Lineage: purchase_history.product SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
POSTHOOK: Lineage: purchase_history.s SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
POSTHOOK: Lineage: purchase_history.time EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-PREHOOK: query: create table cart_history (s string, cart_id int, time int)
+PREHOOK: query: create table cart_history (s string, cart_id int, `time` int)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@cart_history
-POSTHOOK: query: create table cart_history (s string, cart_id int, time int)
+POSTHOOK: query: create table cart_history (s string, cart_id int, `time` int)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@cart_history
@@ -128,11 +128,11 @@ POSTHOOK: Output: default@cart_history
POSTHOOK: Lineage: cart_history.cart_id EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
POSTHOOK: Lineage: cart_history.s SIMPLE [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
POSTHOOK: Lineage: cart_history.time EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-PREHOOK: query: create table events (s string, st2 string, n int, time int)
+PREHOOK: query: create table events (s string, st2 string, n int, `time` int)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@events
-POSTHOOK: query: create table events (s string, st2 string, n int, time int)
+POSTHOOK: query: create table events (s string, st2 string, n int, `time` int)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@events
@@ -213,15 +213,15 @@ select s
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
PREHOOK: type: QUERY
POSTHOOK: query: explain
@@ -229,15 +229,15 @@ select s
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
@@ -389,15 +389,15 @@ PREHOOK: query: select s
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
PREHOOK: type: QUERY
PREHOOK: Input: default@cart_history
@@ -408,15 +408,15 @@ POSTHOOK: query: select s
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cart_history
@@ -431,15 +431,15 @@ select *
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
PREHOOK: type: QUERY
POSTHOOK: query: explain
@@ -447,15 +447,15 @@ select *
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
@@ -609,15 +609,15 @@ PREHOOK: query: select *
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
PREHOOK: type: QUERY
PREHOOK: Input: default@cart_history
@@ -628,15 +628,15 @@ POSTHOOK: query: select *
from (
select last.*, action.st2, action.n
from (
- select purchase.s, purchase.time, max (mevt.time) as last_stage_time
+ select purchase.s, purchase.`time`, max (mevt.`time`) as last_stage_time
from (select * from purchase_history) purchase
join (select * from cart_history) mevt
on purchase.s = mevt.s
- where purchase.time > mevt.time
- group by purchase.s, purchase.time
+ where purchase.`time` > mevt.`time`
+ group by purchase.s, purchase.`time`
) last
join (select * from events) action
- on last.s = action.s and last.last_stage_time = action.time
+ on last.s = action.s and last.last_stage_time = action.`time`
) list
POSTHOOK: type: QUERY
POSTHOOK: Input: default@cart_history
diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out
index 7bebb0c..5a19ec9 100644
--- a/ql/src/test/results/clientpositive/serde_regex.q.out
+++ b/ql/src/test/results/clientpositive/serde_regex.q.out
@@ -3,7 +3,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size INT,
@@ -20,7 +20,7 @@ CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size INT,
@@ -51,7 +51,7 @@ PREHOOK: query: CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size INT,
@@ -69,7 +69,7 @@ POSTHOOK: query: CREATE TABLE serde_regex(
host STRING,
identity STRING,
`user` STRING,
- time STRING,
+ `time` STRING,
request STRING,
status STRING,
size INT,
@@ -99,21 +99,21 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" I
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@serde_regex
-PREHOOK: query: SELECT * FROM serde_regex ORDER BY time
+PREHOOK: query: SELECT * FROM serde_regex ORDER BY `time`
PREHOOK: type: QUERY
PREHOOK: Input: default@serde_regex
#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM serde_regex ORDER BY time
+POSTHOOK: query: SELECT * FROM serde_regex ORDER BY `time`
POSTHOOK: type: QUERY
POSTHOOK: Input: default@serde_regex
#### A masked pattern was here ####
127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 NULL NULL
127.0.0.1 - - [26/May/2009:00:00:00 +0000] "GET /someurl/?track=Blabla(Main) HTTP/1.1" 200 5864 - "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.65 Safari/525.19"
-PREHOOK: query: SELECT host, size, status, time from serde_regex ORDER BY time
+PREHOOK: query: SELECT host, size, status, `time` from serde_regex ORDER BY `time`
PREHOOK: type: QUERY
PREHOOK: Input: default@serde_regex
#### A masked pattern was here ####
-POSTHOOK: query: SELECT host, size, status, time from serde_regex ORDER BY time
+POSTHOOK: query: SELECT host, size, status, `time` from serde_regex ORDER BY `time`
POSTHOOK: type: QUERY
POSTHOOK: Input: default@serde_regex
#### A masked pattern was here ####
diff --git a/ql/src/test/results/clientpositive/timestamptz.q.out b/ql/src/test/results/clientpositive/timestamptz.q.out
new file mode 100644
index 0000000..13e1c9d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/timestamptz.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: explain select cast('2005-01-03 02:01:00 GMT' as timestamp with time zone)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select cast('2005-01-03 02:01:00 GMT' as timestamp with time zone)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: _dummy_table
+ Row Limit Per Split: 1
+ Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 2005-01-03 02:01:00.0 GMT (type: timestamp with time zone)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: select cast('2005-01-03 02:01:00 GMT' as timestamp with time zone)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select cast('2005-01-03 02:01:00 GMT' as timestamp with time zone)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+2005-01-03 02:01:00 GMT
+PREHOOK: query: explain select cast('2016-01-03 12:26:34.0123 America/Los_Angeles' as timestamptz)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select cast('2016-01-03 12:26:34.0123 America/Los_Angeles' as timestamptz)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: _dummy_table
+ Row Limit Per Split: 1
+ Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 2016-01-03 12:26:34.0123 GMT-08:00 (type: timestamp with time zone)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ ListSink
+
+PREHOOK: query: select cast('2016-01-03 12:26:34.0123 America/Los_Angeles' as timestamptz)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select cast('2016-01-03 12:26:34.0123 America/Los_Angeles' as timestamptz)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+2016-01-03 12:26:34.0123 GMT-08:00
diff --git a/ql/src/test/results/clientpositive/timestamptz_1.q.out b/ql/src/test/results/clientpositive/timestamptz_1.q.out
new file mode 100644
index 0000000..a0063d0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/timestamptz_1.q.out
@@ -0,0 +1,264 @@
+PREHOOK: query: drop table tstz1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tstz1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tstz1(t timestamp with time zone)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tstz1
+POSTHOOK: query: create table tstz1(t timestamp with time zone)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tstz1
+PREHOOK: query: insert overwrite table tstz1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with time zone)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tstz1
+POSTHOOK: query: insert overwrite table tstz1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with time zone)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tstz1
+POSTHOOK: Lineage: tstz1.t EXPRESSION []
+PREHOOK: query: select cast(t as boolean) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as boolean) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+true
+PREHOOK: query: select cast(t as tinyint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as tinyint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-6
+PREHOOK: query: select cast(t as smallint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as smallint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-31750
+PREHOOK: query: select cast(t as int) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as int) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as bigint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as bigint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as string) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as string) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+2016-01-03 12:26:34 GMT-08:00
+PREHOOK: query: insert overwrite table tstz1 select '2016-01-03 12:26:34.1 America/Los_Angeles'
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tstz1
+POSTHOOK: query: insert overwrite table tstz1 select '2016-01-03 12:26:34.1 America/Los_Angeles'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tstz1
+POSTHOOK: Lineage: tstz1.t EXPRESSION []
+PREHOOK: query: select cast(t as boolean) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as boolean) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+true
+PREHOOK: query: select cast(t as tinyint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as tinyint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-6
+PREHOOK: query: select cast(t as smallint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as smallint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-31750
+PREHOOK: query: select cast(t as int) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as int) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as bigint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as bigint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as string) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as string) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+2016-01-03 12:26:34.1 GMT-08:00
+PREHOOK: query: insert overwrite table tstz1 select '2016-01-03 12:26:34.0123 America/Los_Angeles'
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tstz1
+POSTHOOK: query: insert overwrite table tstz1 select '2016-01-03 12:26:34.0123 America/Los_Angeles'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tstz1
+POSTHOOK: Lineage: tstz1.t EXPRESSION []
+PREHOOK: query: select cast(t as boolean) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as boolean) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+true
+PREHOOK: query: select cast(t as tinyint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as tinyint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-6
+PREHOOK: query: select cast(t as smallint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as smallint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-31750
+PREHOOK: query: select cast(t as int) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as int) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as bigint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as bigint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as string) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as string) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+2016-01-03 12:26:34.0123 GMT-08:00
+PREHOOK: query: insert overwrite table tstz1 select '2016-01-03 12:26:34.012300 America/Los_Angeles'
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tstz1
+POSTHOOK: query: insert overwrite table tstz1 select '2016-01-03 12:26:34.012300 America/Los_Angeles'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tstz1
+POSTHOOK: Lineage: tstz1.t EXPRESSION []
+PREHOOK: query: select cast(t as boolean) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as boolean) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+true
+PREHOOK: query: select cast(t as tinyint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as tinyint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-6
+PREHOOK: query: select cast(t as smallint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as smallint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+-31750
+PREHOOK: query: select cast(t as int) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as int) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as bigint) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as bigint) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+1451852794
+PREHOOK: query: select cast(t as string) from tstz1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+POSTHOOK: query: select cast(t as string) from tstz1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz1
+#### A masked pattern was here ####
+2016-01-03 12:26:34.0123 GMT-08:00
diff --git a/ql/src/test/results/clientpositive/timestamptz_2.q.out b/ql/src/test/results/clientpositive/timestamptz_2.q.out
new file mode 100644
index 0000000..8e056d2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/timestamptz_2.q.out
@@ -0,0 +1,76 @@
+PREHOOK: query: drop table tstz2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tstz2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tstz2(t timestamp with time zone)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tstz2
+POSTHOOK: query: create table tstz2(t timestamp with time zone)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tstz2
+PREHOOK: query: insert into table tstz2 values
+ ('2005-04-03 03:01:00.04067 GMT-07:00'),('2005-01-03 02:01:00 GMT'),
+ ('2013-06-03 02:01:00.30547 GMT+01:00'),('2016-01-03 12:26:34.0123 GMT+08:00')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@tstz2
+POSTHOOK: query: insert into table tstz2 values
+ ('2005-04-03 03:01:00.04067 GMT-07:00'),('2005-01-03 02:01:00 GMT'),
+ ('2013-06-03 02:01:00.30547 GMT+01:00'),('2016-01-03 12:26:34.0123 GMT+08:00')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@tstz2
+POSTHOOK: Lineage: tstz2.t EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select * from tstz2 where t='2005-01-03 02:01:00 GMT'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tstz2 where t='2005-01-03 02:01:00 GMT'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+2005-01-03 02:01:00 GMT
+PREHOOK: query: select * from tstz2 where t>'2013-06-03 02:01:00.30547 GMT+01:00'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tstz2 where t>'2013-06-03 02:01:00.30547 GMT+01:00'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+2016-01-03 12:26:34.0123 GMT+08:00
+PREHOOK: query: select min(t),max(t) from tstz2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+POSTHOOK: query: select min(t),max(t) from tstz2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+2005-01-03 02:01:00 GMT 2016-01-03 12:26:34.0123 GMT+08:00
+PREHOOK: query: select t from tstz2 group by t order by t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+POSTHOOK: query: select t from tstz2 group by t order by t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+2005-01-03 02:01:00 GMT
+2005-04-03 03:01:00.04067 GMT-07:00
+2013-06-03 02:01:00.30547 GMT+01:00
+2016-01-03 12:26:34.0123 GMT+08:00
+PREHOOK: query: select * from tstz2 a join tstz2 b on a.t=b.t order by a.t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tstz2 a join tstz2 b on a.t=b.t order by a.t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tstz2
+#### A masked pattern was here ####
+2005-01-03 02:01:00 GMT 2005-01-03 02:01:00 GMT
+2005-04-03 03:01:00.04067 GMT-07:00 2005-04-03 03:01:00.04067 GMT-07:00
+2013-06-03 02:01:00.30547 GMT+01:00 2013-06-03 02:01:00.30547 GMT+01:00
+2016-01-03 12:26:34.0123 GMT+08:00 2016-01-03 12:26:34.0123 GMT+08:00
diff --git a/serde/if/serde.thrift b/serde/if/serde.thrift
index 6caad36..81899ad 100644
--- a/serde/if/serde.thrift
+++ b/serde/if/serde.thrift
@@ -60,6 +60,7 @@ const string VARCHAR_TYPE_NAME = "varchar";
const string DATE_TYPE_NAME = "date";
const string DATETIME_TYPE_NAME = "datetime";
const string TIMESTAMP_TYPE_NAME = "timestamp";
+const string TIMESTAMPTZ_TYPE_NAME = "timestamp with time zone";
const string DECIMAL_TYPE_NAME = "decimal";
const string BINARY_TYPE_NAME = "binary";
const string INTERVAL_YEAR_MONTH_TYPE_NAME = "interval_year_month";
@@ -90,6 +91,7 @@ const set PrimitiveTypes = [
DATE_TYPE_NAME
DATETIME_TYPE_NAME
TIMESTAMP_TYPE_NAME
+ TIMESTAMPTZ_TYPE_NAME
INTERVAL_YEAR_MONTH_TYPE_NAME
INTERVAL_DAY_TIME_TYPE_NAME
DECIMAL_TYPE_NAME
diff --git a/serde/src/gen/thrift/gen-cpp/serde_constants.cpp b/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
index 3a675bf..7d55673 100644
--- a/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
+++ b/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
@@ -77,6 +77,8 @@ serdeConstants::serdeConstants() {
TIMESTAMP_TYPE_NAME = "timestamp";
+ TIMESTAMPTZ_TYPE_NAME = "timestamp with time zone";
+
DECIMAL_TYPE_NAME = "decimal";
BINARY_TYPE_NAME = "binary";
@@ -113,6 +115,7 @@ serdeConstants::serdeConstants() {
PrimitiveTypes.insert("date");
PrimitiveTypes.insert("datetime");
PrimitiveTypes.insert("timestamp");
+ PrimitiveTypes.insert("timestamp with time zone");
PrimitiveTypes.insert("interval_year_month");
PrimitiveTypes.insert("interval_day_time");
PrimitiveTypes.insert("decimal");
diff --git a/serde/src/gen/thrift/gen-cpp/serde_constants.h b/serde/src/gen/thrift/gen-cpp/serde_constants.h
index a5f33fb..ddf6e57 100644
--- a/serde/src/gen/thrift/gen-cpp/serde_constants.h
+++ b/serde/src/gen/thrift/gen-cpp/serde_constants.h
@@ -48,6 +48,7 @@ class serdeConstants {
std::string DATE_TYPE_NAME;
std::string DATETIME_TYPE_NAME;
std::string TIMESTAMP_TYPE_NAME;
+ std::string TIMESTAMPTZ_TYPE_NAME;
std::string DECIMAL_TYPE_NAME;
std::string BINARY_TYPE_NAME;
std::string INTERVAL_YEAR_MONTH_TYPE_NAME;
diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java
index 04ed8f5..1707ee7 100644
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java
@@ -102,6 +102,8 @@
public static final String TIMESTAMP_TYPE_NAME = "timestamp";
+ public static final String TIMESTAMPTZ_TYPE_NAME = "timestamp with time zone";
+
public static final String DECIMAL_TYPE_NAME = "decimal";
public static final String BINARY_TYPE_NAME = "binary";
@@ -140,6 +142,7 @@
PrimitiveTypes.add("date");
PrimitiveTypes.add("datetime");
PrimitiveTypes.add("timestamp");
+ PrimitiveTypes.add("timestamp with time zone");
PrimitiveTypes.add("interval_year_month");
PrimitiveTypes.add("interval_day_time");
PrimitiveTypes.add("decimal");
diff --git a/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php b/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
index 18c3991..13d5694 100644
--- a/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
+++ b/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
@@ -51,6 +51,7 @@ final class Constant extends \Thrift\Type\TConstant {
static protected $DATE_TYPE_NAME;
static protected $DATETIME_TYPE_NAME;
static protected $TIMESTAMP_TYPE_NAME;
+ static protected $TIMESTAMPTZ_TYPE_NAME;
static protected $DECIMAL_TYPE_NAME;
static protected $BINARY_TYPE_NAME;
static protected $INTERVAL_YEAR_MONTH_TYPE_NAME;
@@ -198,6 +199,10 @@ final class Constant extends \Thrift\Type\TConstant {
return "timestamp";
}
+ static protected function init_TIMESTAMPTZ_TYPE_NAME() {
+ return "timestamp with time zone";
+ }
+
static protected function init_DECIMAL_TYPE_NAME() {
return "decimal";
}
@@ -258,6 +263,7 @@ final class Constant extends \Thrift\Type\TConstant {
"date" => true,
"datetime" => true,
"timestamp" => true,
+ "timestamp with time zone" => true,
"interval_year_month" => true,
"interval_day_time" => true,
"decimal" => true,
diff --git a/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py b/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
index fafdc24..dac8bff 100644
--- a/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
+++ b/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
@@ -42,6 +42,7 @@
DATE_TYPE_NAME = "date"
DATETIME_TYPE_NAME = "datetime"
TIMESTAMP_TYPE_NAME = "timestamp"
+TIMESTAMPTZ_TYPE_NAME = "timestamp with time zone"
DECIMAL_TYPE_NAME = "decimal"
BINARY_TYPE_NAME = "binary"
INTERVAL_YEAR_MONTH_TYPE_NAME = "interval_year_month"
@@ -68,6 +69,7 @@
"date",
"datetime",
"timestamp",
+ "timestamp with time zone",
"interval_year_month",
"interval_day_time",
"decimal",
diff --git a/serde/src/gen/thrift/gen-rb/serde_constants.rb b/serde/src/gen/thrift/gen-rb/serde_constants.rb
index 0ce9f27..1665ac7 100644
--- a/serde/src/gen/thrift/gen-rb/serde_constants.rb
+++ b/serde/src/gen/thrift/gen-rb/serde_constants.rb
@@ -73,6 +73,8 @@ DATETIME_TYPE_NAME = %q"datetime"
TIMESTAMP_TYPE_NAME = %q"timestamp"
+TIMESTAMPTZ_TYPE_NAME = %q"timestamp with time zone"
+
DECIMAL_TYPE_NAME = %q"decimal"
BINARY_TYPE_NAME = %q"binary"
@@ -110,6 +112,7 @@ PrimitiveTypes = Set.new([
%q"date",
%q"datetime",
%q"timestamp",
+ %q"timestamp with time zone",
%q"interval_year_month",
%q"interval_day_time",
%q"decimal",
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java
index 7ffc964..f60fb53 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java
@@ -24,7 +24,6 @@
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.serde2.AbstractSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -43,6 +42,7 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveIntervalDayTimeObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveIntervalYearMonthObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampTZObjectorInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
@@ -274,6 +274,12 @@ static void buildJSONString(StringBuilder sb, Object o, ObjectInspector oi, Stri
sb.append('"');
break;
}
+ case TIMESTAMPTZ: {
+ sb.append('"');
+ sb.append(((TimestampTZObjectorInspector) poi)
+ .getPrimitiveWritableObject(o));
+ sb.append('"');
+ }
case BINARY: {
BytesWritable bw = ((BinaryObjectInspector) oi).getPrimitiveWritableObject(o);
Text txt = new Text();
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java
index 5e119d7..1420c1e 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java
@@ -47,9 +47,11 @@
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.serde2.io.HiveIntervalDayTimeWritable;
import org.apache.hadoop.hive.serde2.io.HiveIntervalYearMonthWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -68,6 +70,7 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveIntervalDayTimeObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveIntervalYearMonthObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampTZObjectorInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
@@ -409,13 +412,12 @@ static Object deserialize(InputByteBuffer buffer, TypeInfo type,
case TIMESTAMP:
TimestampWritable t = (reuse == null ? new TimestampWritable() :
(TimestampWritable) reuse);
- byte[] bytes = new byte[TimestampWritable.BINARY_SORTABLE_LENGTH];
+ return deserializeTimestampWritable(buffer, t, invert);
- for (int i = 0; i < bytes.length; i++) {
- bytes[i] = buffer.read(invert);
- }
- t.setBinarySortable(bytes, 0);
- return t;
+ case TIMESTAMPTZ:
+ TimestampTZWritable ht = (reuse == null ? new TimestampTZWritable() :
+ (TimestampTZWritable) reuse);
+ return deserializeTimestampWritable(buffer, ht, invert);
case INTERVAL_YEAR_MONTH: {
HiveIntervalYearMonthWritable i = reuse == null ? new HiveIntervalYearMonthWritable()
@@ -784,6 +786,12 @@ static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi,
serializeTimestampWritable(buffer, t, invert);
return;
}
+ case TIMESTAMPTZ: {
+ TimestampTZObjectorInspector toi = (TimestampTZObjectorInspector) poi;
+ TimestampTZWritable t = toi.getPrimitiveWritableObject(o);
+ serializeTimestampWritable(buffer, t, invert);
+ return;
+ }
case INTERVAL_YEAR_MONTH: {
HiveIntervalYearMonthObjectInspector ioi = (HiveIntervalYearMonthObjectInspector) poi;
HiveIntervalYearMonth intervalYearMonth = ioi.getPrimitiveJavaObject(o);
@@ -959,13 +967,29 @@ public static void serializeDouble(ByteStream.Output buffer, double vd, boolean
writeByte(buffer, (byte) v, invert);
}
- public static void serializeTimestampWritable(ByteStream.Output buffer, TimestampWritable t, boolean invert) {
+ public static void serializeTimestampWritable(ByteStream.Output buffer,
+ TimestampWritableBase t, boolean invert) {
byte[] data = t.getBinarySortable();
for (int i = 0; i < data.length; i++) {
writeByte(buffer, data[i], invert);
}
}
+ public static TimestampWritableBase deserializeTimestampWritable(InputByteBuffer buffer,
+ TimestampWritableBase t, boolean invert, byte[] bytes) throws IOException {
+ for (int i = 0; i < bytes.length; i++) {
+ bytes[i] = buffer.read(invert);
+ }
+ t.setBinarySortable(bytes, 0);
+ return t;
+ }
+
+ public static TimestampWritableBase deserializeTimestampWritable(InputByteBuffer buffer,
+ TimestampWritableBase t, boolean invert) throws IOException {
+ byte[] bytes = new byte[t.binSortableLen()];
+ return deserializeTimestampWritable(buffer, t, invert, bytes);
+ }
+
public static void serializeHiveIntervalYearMonth(ByteStream.Output buffer,
HiveIntervalYearMonth intervalYearMonth, boolean invert) {
int totalMonths = intervalYearMonth.getTotalMonths();
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableDeserializeRead.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableDeserializeRead.java
index a7785b2..d666a31 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableDeserializeRead.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableDeserializeRead.java
@@ -229,7 +229,7 @@ public boolean readNextField() throws IOException {
case TIMESTAMP:
{
if (tempTimestampBytes == null) {
- tempTimestampBytes = new byte[TimestampWritable.BINARY_SORTABLE_LENGTH];
+ tempTimestampBytes = new byte[currentTimestampWritable.binSortableLen()];
}
final boolean invert = columnSortOrderIsDesc[fieldIndex];
for (int i = 0; i < tempTimestampBytes.length; i++) {
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampTZWritable.java b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampTZWritable.java
new file mode 100644
index 0000000..5ec275d
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampTZWritable.java
@@ -0,0 +1,78 @@
+package org.apache.hadoop.hive.serde2.io;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+
+import java.sql.Timestamp;
+
+/**
+ * Writable for TimestampTZ.
+ */
+public class TimestampTZWritable extends TimestampWritableBase {
+
+ public TimestampTZWritable() {
+ timestamp = new TimestampTZ(0, 0);
+ }
+
+ public TimestampTZWritable(byte[] bytes, int offset) {
+ timestamp = new TimestampTZ(0, 0);
+ set(bytes, offset);
+ }
+
+ public TimestampTZWritable(TimestampTZWritable t) {
+ this(t.getBytes(), 0);
+ }
+
+ public TimestampTZWritable(TimestampTZ t) {
+ timestamp = new TimestampTZ(0, 0);
+ set(t);
+ }
+
+ @Override
+ public int maxNumBytes() {
+ return 16;
+ }
+
+ @Override
+ public int binSortableLen() {
+ return 15;
+ }
+
+ @Override
+ public void set(Timestamp t) {
+ if (t != null) {
+ Preconditions.checkArgument(t.getClass().equals(TimestampTZ.class));
+ }
+ super.set(t);
+ }
+
+ @Override
+ public byte[] getBinarySortable() {
+ byte[] b = super.getBinarySortable();
+ Integer tzOffset = getTimezoneOffset();
+ Preconditions.checkArgument(tzOffset != null);
+ intToBytes(tzOffset ^ DECIMAL_OR_SECOND_VINT_FLAG, b, 11);
+ return b;
+ }
+
+ @Override
+ public void setBinarySortable(byte[] bytes, int binSortOffset) {
+ super.setBinarySortable(bytes, binSortOffset);
+ int tzOffset = bytesToInt(bytes, binSortOffset + 11) ^ DECIMAL_OR_SECOND_VINT_FLAG;
+ ((TimestampTZ) timestamp).setOffsetInMin(tzOffset);
+ }
+
+ public TimestampTZ getTimestamp() {
+ if (timestampEmpty) {
+ populateTimestamp();
+ }
+ return (TimestampTZ) timestamp;
+ }
+
+ public static TimestampTZ createTimestampTZ(byte[] bytes, int offset) {
+ TimestampTZ t = new TimestampTZ(0, 0);
+ TimestampWritableBase.setTimestamp(t, bytes, offset);
+ return t;
+ }
+
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java
index bbccc7f..02d23bb 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -17,90 +17,21 @@
*/
package org.apache.hadoop.hive.serde2.io;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.sql.Timestamp;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import com.google.common.base.Preconditions;
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.ql.util.TimestampUtils;
-import org.apache.hadoop.hive.serde2.ByteStream.RandomAccessOutput;
-import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
-import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils.VInt;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableUtils;
+import java.sql.Timestamp;
/**
- * TimestampWritable
- * Writable equivalent of java.sq.Timestamp
- *
- * Timestamps are of the format
- * YYYY-MM-DD HH:MM:SS.[fff...]
- *
- * We encode Unix timestamp in seconds in 4 bytes, using the MSB to signify
- * whether the timestamp has a fractional portion.
- *
- * The fractional portion is reversed, and encoded as a VInt
- * so timestamps with less precision use fewer bytes.
- *
- * 0.1 -> 1
- * 0.01 -> 10
- * 0.001 -> 100
- *
+ * Writable for Timestamp.
*/
-public class TimestampWritable implements WritableComparable {
-
- static final public byte[] nullBytes = {0x0, 0x0, 0x0, 0x0};
-
- private static final int DECIMAL_OR_SECOND_VINT_FLAG = 0x80000000;
- private static final int LOWEST_31_BITS_OF_SEC_MASK = 0x7fffffff;
-
- private static final long SEVEN_BYTE_LONG_SIGN_FLIP = 0xff80L << 48;
-
-
- /** The maximum number of bytes required for a TimestampWritable */
- public static final int MAX_BYTES = 13;
-
- public static final int BINARY_SORTABLE_LENGTH = 11;
-
- private static final ThreadLocal threadLocalDateFormat =
- new ThreadLocal() {
- @Override
- protected DateFormat initialValue() {
- return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- }
- };
-
- private Timestamp timestamp = new Timestamp(0);
-
- /**
- * true if data is stored in timestamp field rather than byte arrays.
- * allows for lazy conversion to bytes when necessary
- * false otherwise
- */
- private boolean bytesEmpty;
- private boolean timestampEmpty;
-
- /* Allow use of external byte[] for efficiency */
- private byte[] currentBytes;
- private final byte[] internalBytes = new byte[MAX_BYTES];
- private byte[] externalBytes;
- private int offset;
+public class TimestampWritable extends TimestampWritableBase {
- /* Constructors */
public TimestampWritable() {
- bytesEmpty = false;
- currentBytes = internalBytes;
- offset = 0;
-
- clearTimestamp();
+ timestamp = new Timestamp(0);
}
public TimestampWritable(byte[] bytes, int offset) {
+ timestamp = new Timestamp(0);
set(bytes, offset);
}
@@ -109,531 +40,48 @@ public TimestampWritable(TimestampWritable t) {
}
public TimestampWritable(Timestamp t) {
+ timestamp = new Timestamp(0);
set(t);
}
- public void set(byte[] bytes, int offset) {
- externalBytes = bytes;
- this.offset = offset;
- bytesEmpty = false;
- currentBytes = externalBytes;
-
- clearTimestamp();
- }
-
- public void setTime(long time) {
- timestamp.setTime(time);
- bytesEmpty = true;
- timestampEmpty = false;
- }
-
+ @Override
public void set(Timestamp t) {
- if (t == null) {
- timestamp.setTime(0);
- timestamp.setNanos(0);
- return;
- }
- this.timestamp = t;
- bytesEmpty = true;
- timestampEmpty = false;
- }
-
- public void set(TimestampWritable t) {
- if (t.bytesEmpty) {
- set(t.getTimestamp());
- return;
- }
- if (t.currentBytes == t.externalBytes) {
- set(t.currentBytes, t.offset);
- } else {
- set(t.currentBytes, 0);
- }
- }
-
- public static void updateTimestamp(Timestamp timestamp, long secondsAsMillis, int nanos) {
- ((Date) timestamp).setTime(secondsAsMillis);
- timestamp.setNanos(nanos);
- }
-
- public void setInternal(long secondsAsMillis, int nanos) {
-
- // This is our way of documenting that we are MUTATING the contents of
- // this writable's internal timestamp.
- updateTimestamp(timestamp, secondsAsMillis, nanos);
-
- bytesEmpty = true;
- timestampEmpty = false;
- }
-
- private void clearTimestamp() {
- timestampEmpty = true;
- }
-
- public void writeToByteStream(RandomAccessOutput byteStream) {
- checkBytes();
- byteStream.write(currentBytes, offset, getTotalLength());
- }
-
- /**
- *
- * @return seconds corresponding to this TimestampWritable
- */
- public long getSeconds() {
- if (!timestampEmpty) {
- return TimestampUtils.millisToSeconds(timestamp.getTime());
- } else if (!bytesEmpty) {
- return TimestampWritable.getSeconds(currentBytes, offset);
- } else {
- throw new IllegalStateException("Both timestamp and bytes are empty");
- }
- }
-
- /**
- *
- * @return nanoseconds in this TimestampWritable
- */
- public int getNanos() {
- if (!timestampEmpty) {
- return timestamp.getNanos();
- } else if (!bytesEmpty) {
- return hasDecimalOrSecondVInt() ?
- TimestampWritable.getNanos(currentBytes, offset + 4) : 0;
- } else {
- throw new IllegalStateException("Both timestamp and bytes are empty");
- }
- }
-
- /**
- * @return length of serialized TimestampWritable data. As a side effect, populates the internal
- * byte array if empty.
- */
- int getTotalLength() {
- checkBytes();
- return getTotalLength(currentBytes, offset);
- }
-
- public static int getTotalLength(byte[] bytes, int offset) {
- int len = 4;
- if (hasDecimalOrSecondVInt(bytes[offset])) {
- int firstVIntLen = WritableUtils.decodeVIntSize(bytes[offset + 4]);
- len += firstVIntLen;
- if (hasSecondVInt(bytes[offset + 4])) {
- len += WritableUtils.decodeVIntSize(bytes[offset + 4 + firstVIntLen]);
- }
- }
- return len;
- }
-
- public Timestamp getTimestamp() {
- if (timestampEmpty) {
- populateTimestamp();
- }
- return timestamp;
- }
-
- /**
- * Used to create copies of objects
- * @return a copy of the internal TimestampWritable byte[]
- */
- public byte[] getBytes() {
- checkBytes();
-
- int len = getTotalLength();
- byte[] b = new byte[len];
-
- System.arraycopy(currentBytes, offset, b, 0, len);
- return b;
- }
-
- /**
- * @return byte[] representation of TimestampWritable that is binary
- * sortable (7 bytes for seconds, 4 bytes for nanoseconds)
- */
- public byte[] getBinarySortable() {
- byte[] b = new byte[BINARY_SORTABLE_LENGTH];
- int nanos = getNanos();
- // We flip the highest-order bit of the seven-byte representation of seconds to make negative
- // values come before positive ones.
- long seconds = getSeconds() ^ SEVEN_BYTE_LONG_SIGN_FLIP;
- sevenByteLongToBytes(seconds, b, 0);
- intToBytes(nanos, b, 7);
- return b;
- }
-
- /**
- * Given a byte[] that has binary sortable data, initialize the internal
- * structures to hold that data
- * @param bytes the byte array that holds the binary sortable representation
- * @param binSortOffset offset of the binary-sortable representation within the buffer.
- */
- public void setBinarySortable(byte[] bytes, int binSortOffset) {
- // Flip the sign bit (and unused bits of the high-order byte) of the seven-byte long back.
- long seconds = readSevenByteLong(bytes, binSortOffset) ^ SEVEN_BYTE_LONG_SIGN_FLIP;
- int nanos = bytesToInt(bytes, binSortOffset + 7);
- int firstInt = (int) seconds;
- boolean hasSecondVInt = seconds < 0 || seconds > Integer.MAX_VALUE;
- if (nanos != 0 || hasSecondVInt) {
- firstInt |= DECIMAL_OR_SECOND_VINT_FLAG;
- } else {
- firstInt &= LOWEST_31_BITS_OF_SEC_MASK;
- }
-
- intToBytes(firstInt, internalBytes, 0);
- setNanosBytes(nanos, internalBytes, 4, hasSecondVInt);
- if (hasSecondVInt) {
- LazyBinaryUtils.writeVLongToByteArray(internalBytes,
- 4 + WritableUtils.decodeVIntSize(internalBytes[4]),
- seconds >> 31);
- }
-
- currentBytes = internalBytes;
- this.offset = 0;
- }
-
- /**
- * The data of TimestampWritable can be stored either in a byte[]
- * or in a Timestamp object. Calling this method ensures that the byte[]
- * is populated from the Timestamp object if previously empty.
- */
- private void checkBytes() {
- if (bytesEmpty) {
- // Populate byte[] from Timestamp
- convertTimestampToBytes(timestamp, internalBytes, 0);
- offset = 0;
- currentBytes = internalBytes;
- bytesEmpty = false;
+ if (t != null) {
+ Preconditions.checkArgument(t.getClass().equals(Timestamp.class));
}
- }
-
- /**
- *
- * @return double representation of the timestamp, accurate to nanoseconds
- */
- public double getDouble() {
- double seconds, nanos;
- if (bytesEmpty) {
- seconds = TimestampUtils.millisToSeconds(timestamp.getTime());
- nanos = timestamp.getNanos();
- } else {
- seconds = getSeconds();
- nanos = getNanos();
- }
- return seconds + nanos / 1000000000;
- }
-
- public static long getLong(Timestamp timestamp) {
- return timestamp.getTime() / 1000;
- }
-
- public void readFields(DataInput in) throws IOException {
- in.readFully(internalBytes, 0, 4);
- if (TimestampWritable.hasDecimalOrSecondVInt(internalBytes[0])) {
- in.readFully(internalBytes, 4, 1);
- int len = (byte) WritableUtils.decodeVIntSize(internalBytes[4]);
- if (len > 1) {
- in.readFully(internalBytes, 5, len-1);
- }
-
- long vlong = LazyBinaryUtils.readVLongFromByteArray(internalBytes, 4);
- if (vlong < -1000000000 || vlong > 999999999) {
- throw new IOException(
- "Invalid first vint value (encoded nanoseconds) of a TimestampWritable: " + vlong +
- ", expected to be between -1000000000 and 999999999.");
- // Note that -1000000000 is a valid value corresponding to a nanosecond timestamp
- // of 999999999, because if the second VInt is present, we use the value
- // (-reversedNanoseconds - 1) as the second VInt.
- }
- if (vlong < 0) {
- // This indicates there is a second VInt containing the additional bits of the seconds
- // field.
- in.readFully(internalBytes, 4 + len, 1);
- int secondVIntLen = (byte) WritableUtils.decodeVIntSize(internalBytes[4 + len]);
- if (secondVIntLen > 1) {
- in.readFully(internalBytes, 5 + len, secondVIntLen - 1);
- }
- }
- }
- currentBytes = internalBytes;
- this.offset = 0;
- }
-
- public void write(DataOutput out) throws IOException {
- checkBytes();
- out.write(currentBytes, offset, getTotalLength());
- }
-
- public int compareTo(TimestampWritable t) {
- checkBytes();
- long s1 = this.getSeconds();
- long s2 = t.getSeconds();
- if (s1 == s2) {
- int n1 = this.getNanos();
- int n2 = t.getNanos();
- if (n1 == n2) {
- return 0;
- }
- return n1 - n2;
- } else {
- return s1 < s2 ? -1 : 1;
- }
- }
-
- @Override
- public boolean equals(Object o) {
- return compareTo((TimestampWritable) o) == 0;
+ super.set(t);
}
@Override
- public String toString() {
- if (timestampEmpty) {
- populateTimestamp();
- }
-
- String timestampString = timestamp.toString();
- if (timestampString.length() > 19) {
- if (timestampString.length() == 21) {
- if (timestampString.substring(19).compareTo(".0") == 0) {
- return threadLocalDateFormat.get().format(timestamp);
- }
- }
- return threadLocalDateFormat.get().format(timestamp) + timestampString.substring(19);
- }
-
- return threadLocalDateFormat.get().format(timestamp);
+ public int maxNumBytes() {
+ return 13;
}
@Override
- public int hashCode() {
- long seconds = getSeconds();
- seconds <<= 30; // the nanosecond part fits in 30 bits
- seconds |= getNanos();
- return (int) ((seconds >>> 32) ^ seconds);
- }
-
- private void populateTimestamp() {
- long seconds = getSeconds();
- int nanos = getNanos();
- timestamp.setTime(seconds * 1000);
- timestamp.setNanos(nanos);
- }
-
- /** Static methods **/
-
- /**
- * Gets seconds stored as integer at bytes[offset]
- * @param bytes
- * @param offset
- * @return the number of seconds
- */
- public static long getSeconds(byte[] bytes, int offset) {
- int lowest31BitsOfSecondsAndFlag = bytesToInt(bytes, offset);
- if (lowest31BitsOfSecondsAndFlag >= 0 || // the "has decimal or second VInt" flag is not set
- !hasSecondVInt(bytes[offset + 4])) {
- // The entire seconds field is stored in the first 4 bytes.
- return lowest31BitsOfSecondsAndFlag & LOWEST_31_BITS_OF_SEC_MASK;
- }
-
- // We compose the seconds field from two parts. The lowest 31 bits come from the first four
- // bytes. The higher-order bits come from the second VInt that follows the nanos field.
- return ((long) (lowest31BitsOfSecondsAndFlag & LOWEST_31_BITS_OF_SEC_MASK)) |
- (LazyBinaryUtils.readVLongFromByteArray(bytes,
- offset + 4 + WritableUtils.decodeVIntSize(bytes[offset + 4])) << 31);
- }
-
- public static int getNanos(byte[] bytes, int offset) {
- VInt vInt = LazyBinaryUtils.threadLocalVInt.get();
- LazyBinaryUtils.readVInt(bytes, offset, vInt);
- int val = vInt.value;
- if (val < 0) {
- // This means there is a second VInt present that specifies additional bits of the timestamp.
- // The reversed nanoseconds value is still encoded in this VInt.
- val = -val - 1;
- }
- int len = (int) Math.floor(Math.log10(val)) + 1;
-
- // Reverse the value
- int tmp = 0;
- while (val != 0) {
- tmp *= 10;
- tmp += val % 10;
- val /= 10;
- }
- val = tmp;
-
- if (len < 9) {
- val *= Math.pow(10, 9 - len);
- }
- return val;
- }
-
- /**
- * Writes a Timestamp's serialized value to byte array b at the given offset
- * @param t to convert to bytes
- * @param b destination byte array
- * @param offset destination offset in the byte array
- */
- public static void convertTimestampToBytes(Timestamp t, byte[] b,
- int offset) {
- long millis = t.getTime();
- int nanos = t.getNanos();
-
- long seconds = TimestampUtils.millisToSeconds(millis);
- boolean hasSecondVInt = seconds < 0 || seconds > Integer.MAX_VALUE;
- boolean hasDecimal = setNanosBytes(nanos, b, offset+4, hasSecondVInt);
-
- int firstInt = (int) seconds;
- if (hasDecimal || hasSecondVInt) {
- firstInt |= DECIMAL_OR_SECOND_VINT_FLAG;
- } else {
- firstInt &= LOWEST_31_BITS_OF_SEC_MASK;
- }
- intToBytes(firstInt, b, offset);
-
- if (hasSecondVInt) {
- LazyBinaryUtils.writeVLongToByteArray(b,
- offset + 4 + WritableUtils.decodeVIntSize(b[offset + 4]),
- seconds >> 31);
- }
+ public int binSortableLen() {
+ return 11;
}
/**
- * Given an integer representing nanoseconds, write its serialized
- * value to the byte array b at offset
- *
- * @param nanos
- * @param b
- * @param offset
- * @return
- */
- private static boolean setNanosBytes(int nanos, byte[] b, int offset, boolean hasSecondVInt) {
- int decimal = 0;
- if (nanos != 0) {
- int counter = 0;
- while (counter < 9) {
- decimal *= 10;
- decimal += nanos % 10;
- nanos /= 10;
- counter++;
- }
- }
-
- if (hasSecondVInt || decimal != 0) {
- // We use the sign of the reversed-nanoseconds field to indicate that there is a second VInt
- // present.
- LazyBinaryUtils.writeVLongToByteArray(b, offset, hasSecondVInt ? (-decimal - 1) : decimal);
- }
- return decimal != 0;
- }
-
- public HiveDecimal getHiveDecimal() {
- if (timestampEmpty) {
- populateTimestamp();
- }
- return getHiveDecimal(timestamp);
- }
-
- public static HiveDecimal getHiveDecimal(Timestamp timestamp) {
- // The BigDecimal class recommends not converting directly from double to BigDecimal,
- // so we convert through a string...
- Double timestampDouble = TimestampUtils.getDouble(timestamp);
- HiveDecimal result = HiveDecimal.create(timestampDouble.toString());
- return result;
- }
-
-
- /**
* Converts the time in seconds or milliseconds to a timestamp.
* @param time time in seconds or in milliseconds
* @return the timestamp
*/
public static Timestamp longToTimestamp(long time, boolean intToTimestampInSeconds) {
- // If the time is in seconds, converts it to milliseconds first.
- return new Timestamp(intToTimestampInSeconds ? time * 1000 : time);
- }
-
- public static void setTimestamp(Timestamp t, byte[] bytes, int offset) {
- long seconds = getSeconds(bytes, offset);
- t.setTime(seconds * 1000);
- if (hasDecimalOrSecondVInt(bytes[offset])) {
- t.setNanos(getNanos(bytes, offset + 4));
- } else {
- t.setNanos(0);
- }
+ // If the time is in seconds, converts it to milliseconds first.
+ return new Timestamp(intToTimestampInSeconds ? time * 1000 : time);
}
public static Timestamp createTimestamp(byte[] bytes, int offset) {
Timestamp t = new Timestamp(0);
- TimestampWritable.setTimestamp(t, bytes, offset);
+ TimestampWritableBase.setTimestamp(t, bytes, offset);
return t;
}
- private static boolean hasDecimalOrSecondVInt(byte b) {
- return (b >> 7) != 0;
- }
-
- private static boolean hasSecondVInt(byte b) {
- return WritableUtils.isNegativeVInt(b);
- }
-
- private final boolean hasDecimalOrSecondVInt() {
- return hasDecimalOrSecondVInt(currentBytes[offset]);
- }
-
- public final boolean hasDecimal() {
- return hasDecimalOrSecondVInt() || currentBytes[offset + 4] != -1;
- // If the first byte of the VInt is -1, the VInt itself is -1, indicating that there is a
- // second VInt but the nanoseconds field is actually 0.
- }
-
- /**
- * Writes value into dest at offset
- * @param value
- * @param dest
- * @param offset
- */
- private static void intToBytes(int value, byte[] dest, int offset) {
- dest[offset] = (byte) ((value >> 24) & 0xFF);
- dest[offset+1] = (byte) ((value >> 16) & 0xFF);
- dest[offset+2] = (byte) ((value >> 8) & 0xFF);
- dest[offset+3] = (byte) (value & 0xFF);
- }
-
- /**
- * Writes value into dest at offset as a seven-byte
- * serialized long number.
- */
- static void sevenByteLongToBytes(long value, byte[] dest, int offset) {
- dest[offset] = (byte) ((value >> 48) & 0xFF);
- dest[offset+1] = (byte) ((value >> 40) & 0xFF);
- dest[offset+2] = (byte) ((value >> 32) & 0xFF);
- dest[offset+3] = (byte) ((value >> 24) & 0xFF);
- dest[offset+4] = (byte) ((value >> 16) & 0xFF);
- dest[offset+5] = (byte) ((value >> 8) & 0xFF);
- dest[offset+6] = (byte) (value & 0xFF);
- }
-
- /**
- *
- * @param bytes
- * @param offset
- * @return integer represented by the four bytes in bytes
- * beginning at offset
- */
- private static int bytesToInt(byte[] bytes, int offset) {
- return ((0xFF & bytes[offset]) << 24)
- | ((0xFF & bytes[offset+1]) << 16)
- | ((0xFF & bytes[offset+2]) << 8)
- | (0xFF & bytes[offset+3]);
- }
-
- static long readSevenByteLong(byte[] bytes, int offset) {
- // We need to shift everything 8 bits left and then shift back to populate the sign field.
- return (((0xFFL & bytes[offset]) << 56)
- | ((0xFFL & bytes[offset+1]) << 48)
- | ((0xFFL & bytes[offset+2]) << 40)
- | ((0xFFL & bytes[offset+3]) << 32)
- | ((0xFFL & bytes[offset+4]) << 24)
- | ((0xFFL & bytes[offset+5]) << 16)
- | ((0xFFL & bytes[offset+6]) << 8)) >> 8;
+ public Timestamp getTimestamp() {
+ if (timestampEmpty) {
+ populateTimestamp();
+ }
+ return timestamp;
}
}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritableBase.java b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritableBase.java
new file mode 100644
index 0000000..7be2d37
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritableBase.java
@@ -0,0 +1,667 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.io;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.sql.Timestamp;
+import java.util.Date;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.ql.util.TimestampUtils;
+import org.apache.hadoop.hive.serde2.ByteStream.RandomAccessOutput;
+import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
+import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils.VInt;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableUtils;
+
+/**
+ * TimestampWritableBase
+ * Writable equivalent of java.sq.Timestamp
+ *
+ * Timestamps are of the format
+ * YYYY-MM-DD HH:MM:SS.[fff...]
+ *
+ * We encode Unix timestamp in seconds in 4 bytes, using the MSB to signify
+ * whether the timestamp has a fractional portion.
+ *
+ * The fractional portion is reversed, and encoded as a VInt
+ * so timestamps with less precision use fewer bytes.
+ *
+ * 0.1 -> 1
+ * 0.01 -> 10
+ * 0.001 -> 100
+ *
+ */
+public abstract class TimestampWritableBase implements WritableComparable {
+
+ static final public byte[] nullBytes = {0x0, 0x0, 0x0, 0x0};
+
+ protected static final int DECIMAL_OR_SECOND_VINT_FLAG = 0x80000000;
+ private static final int LOWEST_31_BITS_OF_SEC_MASK = 0x7fffffff;
+
+ private static final long SEVEN_BYTE_LONG_SIGN_FLIP = 0xff80L << 48;
+
+ private static final int TIMEZONE_MASK = 1 << 30;
+
+
+ /** The maximum number of bytes required for a TimestampWritableBase */
+ public abstract int maxNumBytes();
+
+ public abstract int binSortableLen();
+
+ protected Timestamp timestamp;
+
+ /**
+ * true if data is stored in timestamp field rather than byte arrays.
+ * allows for lazy conversion to bytes when necessary
+ * false otherwise
+ */
+ protected boolean bytesEmpty;
+ protected boolean timestampEmpty;
+
+ /* Allow use of external byte[] for efficiency */
+ private byte[] currentBytes;
+ private final byte[] internalBytes = new byte[maxNumBytes()];
+ private byte[] externalBytes;
+ private int offset;
+
+ /* Constructors */
+ protected TimestampWritableBase() {
+ bytesEmpty = false;
+ currentBytes = internalBytes;
+ offset = 0;
+
+ clearTimestamp();
+ }
+
+ public void set(byte[] bytes, int offset) {
+ externalBytes = bytes;
+ this.offset = offset;
+ bytesEmpty = false;
+ currentBytes = externalBytes;
+
+ clearTimestamp();
+ }
+
+ public void setTime(long time) {
+ timestamp.setTime(time);
+ bytesEmpty = true;
+ timestampEmpty = false;
+ }
+
+ public void set(Timestamp t) {
+ if (t == null) {
+ timestamp.setTime(0);
+ timestamp.setNanos(0);
+ if (timestamp instanceof TimestampTZ) {
+ ((TimestampTZ) timestamp).setOffsetInMin(0);
+ }
+ return;
+ }
+ this.timestamp = t;
+ bytesEmpty = true;
+ timestampEmpty = false;
+ }
+
+ public void set(TimestampWritableBase t) {
+ t.checkBytes();
+ if (t.currentBytes == t.externalBytes) {
+ set(t.currentBytes, t.offset);
+ } else {
+ set(t.currentBytes, 0);
+ }
+ }
+
+ private static void updateTimestamp(Timestamp timestamp, long secondsAsMillis, int nanos) {
+ ((Date) timestamp).setTime(secondsAsMillis);
+ timestamp.setNanos(nanos);
+ }
+
+ public void setInternal(long secondsAsMillis, int nanos) {
+
+ // This is our way of documenting that we are MUTATING the contents of
+ // this writable's internal timestamp.
+ updateTimestamp(timestamp, secondsAsMillis, nanos);
+
+ bytesEmpty = true;
+ timestampEmpty = false;
+ }
+
+ private void clearTimestamp() {
+ timestampEmpty = true;
+ }
+
+ public void writeToByteStream(RandomAccessOutput byteStream) {
+ checkBytes();
+ byteStream.write(currentBytes, offset, getTotalLength());
+ }
+
+ /**
+ *
+ * @return seconds corresponding to this TimestampWritableBase
+ */
+ public long getSeconds() {
+ if (!timestampEmpty) {
+ return TimestampUtils.millisToSeconds(timestamp.getTime());
+ } else if (!bytesEmpty) {
+ return getSeconds(currentBytes, offset);
+ } else {
+ throw new IllegalStateException("Both timestamp and bytes are empty");
+ }
+ }
+
+ /**
+ *
+ * @return nanoseconds in this TimestampWritableBase
+ */
+ public int getNanos() {
+ if (!timestampEmpty) {
+ return timestamp.getNanos();
+ } else if (!bytesEmpty) {
+ return hasDecimalOrSecondVInt() ?
+ getNanos(currentBytes, offset + 4) : 0;
+ } else {
+ throw new IllegalStateException("Both timestamp and bytes are empty");
+ }
+ }
+
+ protected Integer getTimezoneOffset() {
+ if (!timestampEmpty) {
+ return timestamp instanceof TimestampTZ ?
+ ((TimestampTZ) timestamp).getOffsetInMin() : null;
+ } else if (!bytesEmpty) {
+ return hasDecimalOrSecondVInt() ? getTimezoneOffset(currentBytes, offset + 4) : null;
+ } else {
+ throw new IllegalStateException("Both timestamp and bytes are empty");
+ }
+ }
+
+ // offset should point to the start of decimal field
+ private static Integer getTimezoneOffset(byte[] bytes, final int offset) {
+ if (hasTimezoneOffset(bytes, offset)) {
+ int pos = offset + WritableUtils.decodeVIntSize(bytes[offset]);
+ // skip the 2nd VInt
+ if (hasSecondVInt(bytes[offset])) {
+ pos += WritableUtils.decodeVIntSize(bytes[pos]);
+ }
+ return readVInt(bytes, pos);
+ }
+ return null;
+ }
+
+ private static boolean hasTimezoneOffset(byte[] bytes, int offset) {
+ int val = readVInt(bytes, offset);
+ return (val >= 0 && (val & TIMEZONE_MASK) != 0) ||
+ (val < 0 && (val & TIMEZONE_MASK) == 0);
+ }
+
+ /**
+ * @return length of serialized TimestampWritableBase data. As a side effect, populates the internal
+ * byte array if empty.
+ */
+ int getTotalLength() {
+ checkBytes();
+ return getTotalLength(currentBytes, offset);
+ }
+
+ public static int getTotalLength(byte[] bytes, int offset) {
+ int pos = offset + 4;
+ if (hasDecimalOrSecondVInt(bytes[offset])) {
+ boolean hasSecondVInt = hasSecondVInt(bytes[pos]);
+ boolean hasTimezoneOffset = hasTimezoneOffset(bytes, pos);
+ pos += WritableUtils.decodeVIntSize(bytes[pos]);
+ if (hasSecondVInt) {
+ pos += WritableUtils.decodeVIntSize(bytes[pos]);
+ }
+ if (hasTimezoneOffset) {
+ pos += WritableUtils.decodeVIntSize(bytes[pos]);
+ }
+ }
+ return pos - offset;
+ }
+
+ /**
+ * Used to create copies of objects
+ * @return a copy of the internal TimestampWritableBase byte[]
+ */
+ public byte[] getBytes() {
+ checkBytes();
+
+ int len = getTotalLength();
+ byte[] b = new byte[len];
+
+ System.arraycopy(currentBytes, offset, b, 0, len);
+ return b;
+ }
+
+ /**
+ * @return byte[] representation of TimestampWritableBase that is binary
+ * sortable (7 bytes for seconds, 4 bytes for nanoseconds, 4 bytes for timezone offset)
+ */
+ public byte[] getBinarySortable() {
+ byte[] b = new byte[binSortableLen()];
+ int nanos = getNanos();
+ // We flip the highest-order bit of the seven-byte representation of seconds to make negative
+ // values come before positive ones.
+ long seconds = getSeconds() ^ SEVEN_BYTE_LONG_SIGN_FLIP;
+ sevenByteLongToBytes(seconds, b, 0);
+ intToBytes(nanos, b, 7);
+ return b;
+ }
+
+ /**
+ * Given a byte[] that has binary sortable data, initialize the internal
+ * structures to hold that data
+ * @param bytes the byte array that holds the binary sortable representation
+ * @param binSortOffset offset of the binary-sortable representation within the buffer.
+ */
+ public void setBinarySortable(byte[] bytes, int binSortOffset) {
+ // Flip the sign bit (and unused bits of the high-order byte) of the seven-byte long back.
+ long seconds = readSevenByteLong(bytes, binSortOffset) ^ SEVEN_BYTE_LONG_SIGN_FLIP;
+ int nanos = bytesToInt(bytes, binSortOffset + 7);
+ timestamp.setTime(seconds * 1000);
+ timestamp.setNanos(nanos);
+ timestampEmpty = false;
+ bytesEmpty = true;
+ }
+
+ /**
+ * The data of TimestampWritableBase can be stored either in a byte[]
+ * or in a Timestamp object. Calling this method ensures that the byte[]
+ * is populated from the Timestamp object if previously empty.
+ */
+ private void checkBytes() {
+ if (bytesEmpty) {
+ // Populate byte[] from Timestamp
+ populateBytes();
+ offset = 0;
+ currentBytes = internalBytes;
+ bytesEmpty = false;
+ }
+ }
+
+ /**
+ *
+ * @return double representation of the timestamp, accurate to nanoseconds
+ */
+ public double getDouble() {
+ double seconds, nanos;
+ if (bytesEmpty) {
+ seconds = TimestampUtils.millisToSeconds(timestamp.getTime());
+ nanos = timestamp.getNanos();
+ } else {
+ seconds = getSeconds();
+ nanos = getNanos();
+ }
+ return seconds + nanos / 1000000000;
+ }
+
+ public static long getLong(Timestamp timestamp) {
+ return timestamp.getTime() / 1000;
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ in.readFully(internalBytes, 0, 4);
+ if (TimestampWritableBase.hasDecimalOrSecondVInt(internalBytes[0])) {
+ in.readFully(internalBytes, 4, 1);
+ int len = (byte) WritableUtils.decodeVIntSize(internalBytes[4]);
+ if (len > 1) {
+ in.readFully(internalBytes, 5, len - 1);
+ }
+
+ int pos = 4 + len;
+ if (hasSecondVInt(internalBytes[4])) {
+ // This indicates there is a second VInt containing the additional bits of the seconds
+ // field.
+ in.readFully(internalBytes, pos, 1);
+ int secondVIntLen = (byte) WritableUtils.decodeVIntSize(internalBytes[pos]);
+ if (secondVIntLen > 1) {
+ in.readFully(internalBytes, pos + 1, secondVIntLen - 1);
+ }
+ pos += secondVIntLen;
+ }
+
+ if (hasTimezoneOffset(internalBytes, 4)) {
+ in.readFully(internalBytes, pos, 1);
+ int tzOffsetLen = WritableUtils.decodeVIntSize(internalBytes[pos]);
+ if (tzOffsetLen > 1) {
+ in.readFully(internalBytes, pos + 1, tzOffsetLen - 1);
+ }
+ }
+ }
+ currentBytes = internalBytes;
+ this.offset = 0;
+ }
+
+ public void write(DataOutput out) throws IOException {
+ checkBytes();
+ out.write(currentBytes, offset, getTotalLength());
+ }
+
+ @Override
+ public int compareTo(TimestampWritableBase t) {
+ checkBytes();
+ long s1 = this.getSeconds();
+ long s2 = t.getSeconds();
+ if (s1 == s2) {
+ int n1 = this.getNanos();
+ int n2 = t.getNanos();
+ if (n1 == n2) {
+ Integer tz1 = getTimezoneOffset();
+ Integer tz2 = t.getTimezoneOffset();
+ if (tz1 == null || tz2 == null) {
+ if (tz1 != null) {
+ return 1;
+ }
+ if (tz2 != null) {
+ return -1;
+ }
+ return 0;
+ }
+ return tz1 - tz2;
+ }
+ return n1 - n2;
+ } else {
+ return s1 < s2 ? -1 : 1;
+ }
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return compareTo((TimestampWritableBase) o) == 0;
+ }
+
+ @Override
+ public String toString() {
+ if (timestampEmpty) {
+ populateTimestamp();
+ }
+
+ String timestampString = timestamp.toString();
+ if (timestampString.length() > 19) {
+ if (timestampString.substring(19, 21).compareTo(".0") == 0) {
+ if (timestampString.length() == 21 || !Character.isDigit(timestampString.charAt(21))) {
+ timestampString = timestampString.substring(0, 19) + timestampString.substring(21);
+ }
+ }
+ }
+
+ return timestampString;
+ }
+
+ @Override
+ public int hashCode() {
+ long seconds = getSeconds();
+ seconds <<= 30; // the nanosecond part fits in 30 bits
+ seconds |= getNanos();
+ Integer tzOffset = getTimezoneOffset();
+ int hash = (int) ((seconds >>> 32) ^ seconds);
+ if (tzOffset != null) {
+ hash ^= tzOffset;
+ }
+ return hash;
+ }
+
+ protected void populateTimestamp() {
+ long seconds = getSeconds();
+ int nanos = getNanos();
+ timestamp.setTime(seconds * 1000);
+ timestamp.setNanos(nanos);
+ Integer tzOffset = getTimezoneOffset();
+ if (timestamp instanceof TimestampTZ) {
+ Preconditions.checkArgument(tzOffset != null);
+ ((TimestampTZ) timestamp).setOffsetInMin(tzOffset);
+ } else {
+ Preconditions.checkArgument(tzOffset == null);
+ }
+ timestampEmpty = false;
+ }
+
+ /** Static methods **/
+
+ /**
+ * Gets seconds stored as integer at bytes[offset]
+ * @param bytes
+ * @param offset
+ * @return the number of seconds
+ */
+ public static long getSeconds(byte[] bytes, int offset) {
+ int lowest31BitsOfSecondsAndFlag = bytesToInt(bytes, offset);
+ if (lowest31BitsOfSecondsAndFlag >= 0 || // the "has decimal or second VInt" flag is not set
+ !hasSecondVInt(bytes[offset + 4])) {
+ // The entire seconds field is stored in the first 4 bytes.
+ return lowest31BitsOfSecondsAndFlag & LOWEST_31_BITS_OF_SEC_MASK;
+ }
+
+ // We compose the seconds field from two parts. The lowest 31 bits come from the first four
+ // bytes. The higher-order bits come from the second VInt that follows the nanos field.
+ return ((long) (lowest31BitsOfSecondsAndFlag & LOWEST_31_BITS_OF_SEC_MASK)) |
+ (LazyBinaryUtils.readVLongFromByteArray(bytes,
+ offset + 4 + WritableUtils.decodeVIntSize(bytes[offset + 4])) << 31);
+ }
+
+ public static int getNanos(byte[] bytes, int offset) {
+ int val = readVInt(bytes, offset);
+ if (val < 0) {
+ val |= TIMEZONE_MASK;
+ // This means there is a second VInt present that specifies additional bits of the timestamp.
+ // The reversed nanoseconds value is still encoded in this VInt.
+ val = -val - 1;
+ } else {
+ val &= ~TIMEZONE_MASK;
+ }
+ int len = (int) Math.floor(Math.log10(val)) + 1;
+
+ // Reverse the value
+ int tmp = 0;
+ while (val != 0) {
+ tmp *= 10;
+ tmp += val % 10;
+ val /= 10;
+ }
+ val = tmp;
+
+ if (len < 9) {
+ val *= Math.pow(10, 9 - len);
+ }
+ return val;
+ }
+
+ private static int readVInt(byte[] bytes, int offset) {
+ VInt vInt = LazyBinaryUtils.threadLocalVInt.get();
+ LazyBinaryUtils.readVInt(bytes, offset, vInt);
+ return vInt.value;
+ }
+
+ /**
+ * Writes the Timestamp's serialized value to the internal byte array.
+ */
+ private void populateBytes() {
+ long millis = timestamp.getTime();
+ int nanos = timestamp.getNanos();
+
+ boolean hasTimezone = timestamp instanceof TimestampTZ;
+ long seconds = TimestampUtils.millisToSeconds(millis);
+ boolean hasSecondVInt = seconds < 0 || seconds > Integer.MAX_VALUE;
+ int position = 4;
+ boolean hasDecimal = setNanosBytes(nanos, internalBytes, position, hasSecondVInt, hasTimezone);
+
+ int firstInt = (int) seconds;
+ if (hasDecimal || hasSecondVInt || hasTimezone) {
+ firstInt |= DECIMAL_OR_SECOND_VINT_FLAG;
+ } else {
+ firstInt &= LOWEST_31_BITS_OF_SEC_MASK;
+ }
+ intToBytes(firstInt, internalBytes, 0);
+
+ if (hasSecondVInt) {
+ position += WritableUtils.decodeVIntSize(internalBytes[position]);
+ LazyBinaryUtils.writeVLongToByteArray(internalBytes, position, seconds >> 31);
+ }
+
+ if (hasTimezone) {
+ position += WritableUtils.decodeVIntSize(internalBytes[position]);
+ LazyBinaryUtils.writeVLongToByteArray(internalBytes, position,
+ ((TimestampTZ) timestamp).getOffsetInMin());
+ }
+ }
+
+ /**
+ * Given an integer representing nanoseconds, write its serialized
+ * value to the byte array b at offset
+ *
+ * @param nanos
+ * @param b
+ * @param offset
+ * @return
+ */
+ private static boolean setNanosBytes(int nanos, byte[] b, int offset,
+ boolean hasSecondVInt, boolean hasTimezone) {
+ int decimal = 0;
+ if (nanos != 0) {
+ int counter = 0;
+ while (counter < 9) {
+ decimal *= 10;
+ decimal += nanos % 10;
+ nanos /= 10;
+ counter++;
+ }
+ }
+
+ if (hasSecondVInt || decimal != 0 || hasTimezone) {
+ // We use the sign of the reversed-nanoseconds field to indicate that there is a second VInt
+ // present.
+ int toWrite = decimal;
+ if (hasSecondVInt) {
+ toWrite = -toWrite - 1;
+ }
+ // Decimal ranges in [-1000000000, 999999999]. Use the second MSB to indicate if
+ // timezone is present.
+ // if toWrite >= 0, second MSB is always 0, otherwise it's always 1
+ if (hasTimezone) {
+ if (toWrite >= 0) {
+ toWrite |= TIMEZONE_MASK;
+ } else {
+ toWrite &= ~TIMEZONE_MASK;
+ }
+ }
+ LazyBinaryUtils.writeVLongToByteArray(b, offset, toWrite);
+ }
+ return decimal != 0;
+ }
+
+ public HiveDecimal getHiveDecimal() {
+ if (timestampEmpty) {
+ populateTimestamp();
+ }
+ return getHiveDecimal(timestamp);
+ }
+
+ public static HiveDecimal getHiveDecimal(Timestamp timestamp) {
+ // The BigDecimal class recommends not converting directly from double to BigDecimal,
+ // so we convert through a string...
+ Double timestampDouble = TimestampUtils.getDouble(timestamp);
+ HiveDecimal result = HiveDecimal.create(timestampDouble.toString());
+ return result;
+ }
+
+ public static void setTimestamp(Timestamp t, byte[] bytes, int offset) {
+ long seconds = getSeconds(bytes, offset);
+ t.setTime(seconds * 1000);
+ if (hasDecimalOrSecondVInt(bytes[offset])) {
+ t.setNanos(getNanos(bytes, offset + 4));
+ Integer tzOffset = getTimezoneOffset(bytes, offset + 4);
+ if (t instanceof TimestampTZ) {
+ Preconditions.checkArgument(tzOffset != null);
+ ((TimestampTZ) t).setOffsetInMin(tzOffset);
+ } else {
+ Preconditions.checkArgument(tzOffset == null);
+ }
+ } else {
+ t.setNanos(0);
+ }
+ }
+
+ private static boolean hasDecimalOrSecondVInt(byte b) {
+ return (b >> 7) != 0;
+ }
+
+ private static boolean hasSecondVInt(byte b) {
+ return WritableUtils.isNegativeVInt(b);
+ }
+
+ private final boolean hasDecimalOrSecondVInt() {
+ return hasDecimalOrSecondVInt(currentBytes[offset]);
+ }
+
+ /**
+ * Writes value into dest at offset
+ * @param value
+ * @param dest
+ * @param offset
+ */
+ protected static void intToBytes(int value, byte[] dest, int offset) {
+ dest[offset] = (byte) ((value >> 24) & 0xFF);
+ dest[offset+1] = (byte) ((value >> 16) & 0xFF);
+ dest[offset+2] = (byte) ((value >> 8) & 0xFF);
+ dest[offset+3] = (byte) (value & 0xFF);
+ }
+
+ /**
+ * Writes value into dest at offset as a seven-byte
+ * serialized long number.
+ */
+ static void sevenByteLongToBytes(long value, byte[] dest, int offset) {
+ dest[offset] = (byte) ((value >> 48) & 0xFF);
+ dest[offset+1] = (byte) ((value >> 40) & 0xFF);
+ dest[offset+2] = (byte) ((value >> 32) & 0xFF);
+ dest[offset+3] = (byte) ((value >> 24) & 0xFF);
+ dest[offset+4] = (byte) ((value >> 16) & 0xFF);
+ dest[offset+5] = (byte) ((value >> 8) & 0xFF);
+ dest[offset+6] = (byte) (value & 0xFF);
+ }
+
+ /**
+ *
+ * @param bytes
+ * @param offset
+ * @return integer represented by the four bytes in bytes
+ * beginning at offset
+ */
+ protected static int bytesToInt(byte[] bytes, int offset) {
+ return ((0xFF & bytes[offset]) << 24)
+ | ((0xFF & bytes[offset+1]) << 16)
+ | ((0xFF & bytes[offset+2]) << 8)
+ | (0xFF & bytes[offset+3]);
+ }
+
+ static long readSevenByteLong(byte[] bytes, int offset) {
+ // We need to shift everything 8 bits left and then shift back to populate the sign field.
+ return (((0xFFL & bytes[offset]) << 56)
+ | ((0xFFL & bytes[offset+1]) << 48)
+ | ((0xFFL & bytes[offset+2]) << 40)
+ | ((0xFFL & bytes[offset+3]) << 32)
+ | ((0xFFL & bytes[offset+4]) << 24)
+ | ((0xFFL & bytes[offset+5]) << 16)
+ | ((0xFFL & bytes[offset+6]) << 8)) >> 8;
+ }
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFactory.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFactory.java
index 23dbe6a..9669525 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFactory.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFactory.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyHiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyHiveIntervalYearMonthObjectInspector;
import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyHiveIntervalDayTimeObjectInspector;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyTimestampTZObjectInspector;
import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyHiveVarcharObjectInspector;
import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyIntObjectInspector;
import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyLongObjectInspector;
@@ -128,6 +129,8 @@
return new LazyDate((LazyDateObjectInspector) oi);
case TIMESTAMP:
return new LazyTimestamp((LazyTimestampObjectInspector) oi);
+ case TIMESTAMPTZ:
+ return new LazyTimestampTZ((LazyTimestampTZObjectInspector) oi);
case INTERVAL_YEAR_MONTH:
return new LazyHiveIntervalYearMonth((LazyHiveIntervalYearMonthObjectInspector) oi);
case INTERVAL_DAY_TIME:
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyTimestamp.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyTimestamp.java
index 56945d1..d5a9c3b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyTimestamp.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyTimestamp.java
@@ -22,6 +22,7 @@
import java.io.UnsupportedEncodingException;
import java.sql.Timestamp;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
@@ -98,7 +99,7 @@ public static void writeUTF8(OutputStream out, TimestampWritable i)
throws IOException {
if (i == null) {
// Serialize as time 0
- out.write(TimestampWritable.nullBytes);
+ out.write(TimestampWritableBase.nullBytes);
} else {
out.write(i.toString().getBytes("US-ASCII"));
}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyTimestampTZ.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyTimestampTZ.java
new file mode 100644
index 0000000..9f8ce09
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyTimestampTZ.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.lazy;
+
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyTimestampTZObjectInspector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+
+public class LazyTimestampTZ extends
+ LazyPrimitive {
+
+ private static final Logger LOG = LoggerFactory.getLogger(LazyTimestampTZ.class);
+
+ public LazyTimestampTZ(LazyTimestampTZObjectInspector oi) {
+ super(oi);
+ data = new TimestampTZWritable();
+ }
+
+ public LazyTimestampTZ(LazyTimestampTZ copy) {
+ super(copy);
+ data = new TimestampTZWritable(copy.data);
+ }
+
+ @Override
+ public void init(ByteArrayRef bytes, int start, int length) {
+ if (!LazyUtils.isDateMaybe(bytes.getData(), start, length)) {
+ isNull = true;
+ return;
+ }
+
+ String s;
+ TimestampTZ t = null;
+ try {
+ s = new String(bytes.getData(), start, length, "US-ASCII");
+ if (s.equals("NULL")) {
+ isNull = true;
+ logExceptionMessage(bytes, start, length,
+ serdeConstants.TIMESTAMPTZ_TYPE_NAME.toUpperCase());
+ } else {
+ t = TimestampTZ.valueOf(s);
+ }
+ } catch (UnsupportedEncodingException e) {
+ isNull = true;
+ LOG.error("Unsupported encoding found ", e);
+ } catch (IllegalArgumentException e) {
+ isNull = true;
+ logExceptionMessage(bytes, start, length, serdeConstants.TIMESTAMPTZ_TYPE_NAME.toUpperCase());
+ }
+ data.set(t);
+ }
+
+ @Override
+ public TimestampTZWritable getWritableObject() {
+ return data;
+ }
+
+ public static void writeUTF8(OutputStream out, TimestampTZWritable i) throws IOException {
+ if (i == null) {
+ // Serialize as time 0
+ out.write(TimestampWritableBase.nullBytes);
+ } else {
+ out.write(i.toString().getBytes("US-ASCII"));
+ }
+ }
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
index 73c72e1..8a40e93 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
@@ -24,7 +24,6 @@
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.Arrays;
-import java.util.Map;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.hive.serde2.SerDeException;
@@ -41,6 +40,7 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveIntervalDayTimeObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveIntervalYearMonthObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampTZObjectorInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
@@ -229,7 +229,9 @@ public static void writePrimitiveUTF8(OutputStream out, Object o,
PrimitiveObjectInspector oi, boolean escaped, byte escapeChar,
boolean[] needsEscape) throws IOException {
- switch (oi.getPrimitiveCategory()) {
+ PrimitiveObjectInspector.PrimitiveCategory category = oi.getPrimitiveCategory();
+
+ switch (category) {
case BOOLEAN: {
boolean b = ((BooleanObjectInspector) oi).get(o);
if (b) {
@@ -305,6 +307,11 @@ public static void writePrimitiveUTF8(OutputStream out, Object o,
((TimestampObjectInspector) oi).getPrimitiveWritableObject(o));
break;
}
+ case TIMESTAMPTZ: {
+ LazyTimestampTZ.writeUTF8(out,
+ ((TimestampTZObjectorInspector) oi).getPrimitiveWritableObject(o));
+ break;
+ }
case INTERVAL_YEAR_MONTH: {
LazyHiveIntervalYearMonth.writeUTF8(out,
((HiveIntervalYearMonthObjectInspector) oi).getPrimitiveWritableObject(o));
@@ -322,7 +329,7 @@ public static void writePrimitiveUTF8(OutputStream out, Object o,
break;
}
default: {
- throw new RuntimeException("Hive internal error.");
+ throw new RuntimeException("Unknown type: " + category);
}
}
}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java
index 5601734..b7faee6 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java
@@ -71,6 +71,8 @@
new LazyDateObjectInspector();
public static final LazyTimestampObjectInspector LAZY_TIMESTAMP_OBJECT_INSPECTOR =
new LazyTimestampObjectInspector();
+ public static final LazyTimestampTZObjectInspector LAZY_HIVE_TIMESTAMP_OBJECT_INSPECTOR =
+ new LazyTimestampTZObjectInspector();
public static final LazyHiveIntervalYearMonthObjectInspector LAZY_INTERVAL_YEAR_MONTH_OBJECT_INSPECTOR =
new LazyHiveIntervalYearMonthObjectInspector();
public static final LazyHiveIntervalDayTimeObjectInspector LAZY_INTERVAL_DAY_TIME_OBJECT_INSPECTOR =
@@ -111,6 +113,8 @@ private LazyPrimitiveObjectInspectorFactory() {
LAZY_DATE_OBJECT_INSPECTOR);
cachedPrimitiveLazyObjectInspectors.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME),
LAZY_TIMESTAMP_OBJECT_INSPECTOR);
+ cachedPrimitiveLazyObjectInspectors.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.TIMESTAMPTZ_TYPE_NAME),
+ LAZY_HIVE_TIMESTAMP_OBJECT_INSPECTOR);
cachedPrimitiveLazyObjectInspectors.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME),
LAZY_INTERVAL_YEAR_MONTH_OBJECT_INSPECTOR);
cachedPrimitiveLazyObjectInspectors.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME),
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyTimestampTZObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyTimestampTZObjectInspector.java
new file mode 100644
index 0000000..a678553
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyTimestampTZObjectInspector.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive;
+
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.lazy.LazyTimestampTZ;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampTZObjectorInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+public class LazyTimestampTZObjectInspector
+ extends AbstractPrimitiveLazyObjectInspector
+ implements TimestampTZObjectorInspector {
+
+ public LazyTimestampTZObjectInspector() {
+ super(TypeInfoFactory.timestamptzTypeInfo);
+ }
+
+ @Override
+ public Object copyObject(Object o) {
+ return o == null ? null : new LazyTimestampTZ((LazyTimestampTZ) o);
+ }
+
+ @Override
+ public TimestampTZ getPrimitiveJavaObject(Object o) {
+ return o == null ? null : ((LazyTimestampTZ) o).getWritableObject().getTimestamp();
+ }
+
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryFactory.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryFactory.java
index 52f3527..e47cf00 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryFactory.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryFactory.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveCharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveIntervalDayTimeObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveIntervalYearMonthObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampTZObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveVarcharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableHiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableBinaryObjectInspector;
@@ -86,6 +87,8 @@
return new LazyBinaryDate((WritableDateObjectInspector) oi);
case TIMESTAMP:
return new LazyBinaryTimestamp((WritableTimestampObjectInspector) oi);
+ case TIMESTAMPTZ:
+ return new LazyBinaryTimestampTZ((WritableTimestampTZObjectInspector) oi);
case INTERVAL_YEAR_MONTH:
return new LazyBinaryHiveIntervalYearMonth((WritableHiveIntervalYearMonthObjectInspector) oi);
case INTERVAL_DAY_TIME:
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
index 54bfd2d..bb90560 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
@@ -24,6 +24,8 @@
import java.util.Map;
import java.util.Properties;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampTZObjectorInspector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -460,6 +462,12 @@ public static void serialize(RandomAccessOutput byteStream, Object obj,
t.writeToByteStream(byteStream);
return;
}
+ case TIMESTAMPTZ: {
+ TimestampTZWritable t = ((TimestampTZObjectorInspector) poi).
+ getPrimitiveWritableObject(obj);
+ t.writeToByteStream(byteStream);
+ return;
+ }
case INTERVAL_YEAR_MONTH: {
HiveIntervalYearMonthWritable intervalYearMonth =
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryTimestampTZ.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryTimestampTZ.java
new file mode 100644
index 0000000..7110a80
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryTimestampTZ.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.lazybinary;
+
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampTZObjectInspector;
+
+/**
+ * A LazyBinaryObject that encodes TimestampTZ
+ */
+public class LazyBinaryTimestampTZ extends
+ LazyBinaryPrimitive {
+
+ public LazyBinaryTimestampTZ(WritableTimestampTZObjectInspector oi) {
+ super(oi);
+ data = new TimestampTZWritable();
+ }
+
+ @Override
+ public void init(ByteArrayRef bytes, int start, int length) {
+ data.set(bytes.getData(), start);
+ }
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java
index f8a110d..897fafa 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hive.serde2.ByteStream.RandomAccessOutput;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.lazybinary.objectinspector.LazyBinaryObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
@@ -201,8 +202,9 @@ public static void checkObjectByteInfo(ObjectInspector objectInspector,
recordInfo.elementSize = WritableUtils.decodeVIntSize(bytes[offset]);
break;
case TIMESTAMP:
+ case TIMESTAMPTZ:
recordInfo.elementOffset = 0;
- recordInfo.elementSize = TimestampWritable.getTotalLength(bytes, offset);
+ recordInfo.elementSize = TimestampWritableBase.getTotalLength(bytes, offset);
break;
case INTERVAL_YEAR_MONTH:
recordInfo.elementOffset = 0;
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
index 24b3d4e..a0f726c 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableHiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableHiveIntervalDayTimeObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableHiveIntervalYearMonthObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableTimestampTZObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableHiveVarcharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableIntObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableLongObjectInspector;
@@ -42,7 +43,6 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableTimestampObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.VoidObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
/**
* ObjectInspectorConverters.
@@ -123,6 +123,9 @@ private static Converter getConverter(PrimitiveObjectInspector inputOI,
return new PrimitiveObjectInspectorConverter.TimestampConverter(
inputOI,
(SettableTimestampObjectInspector) outputOI);
+ case TIMESTAMPTZ:
+ return new PrimitiveObjectInspectorConverter.TimestampTZConverter(
+ inputOI, (SettableTimestampTZObjectInspector) outputOI);
case INTERVAL_YEAR_MONTH:
return new PrimitiveObjectInspectorConverter.HiveIntervalYearMonthConverter(
inputOI,
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
index 1ac72c6..95f812b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
@@ -27,6 +27,9 @@
import java.util.List;
import java.util.Map;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampTZObjectorInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableTimestampTZObjectInspector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.serde.serdeConstants;
@@ -412,6 +415,10 @@ public static Object copyToStandardObject(
result = loi.getPrimitiveJavaObject(o);
if (loi.getPrimitiveCategory() == PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP) {
result = PrimitiveObjectInspectorFactory.javaTimestampObjectInspector.copyObject(result);
+ } else if (loi.getPrimitiveCategory() ==
+ PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMPTZ) {
+ result = PrimitiveObjectInspectorFactory.javaTimestampTZObjectInspector.
+ copyObject(result);
}
break;
case WRITABLE:
@@ -689,6 +696,10 @@ public static int hashCode(Object o, ObjectInspector objIns) {
TimestampWritable t = ((TimestampObjectInspector) poi)
.getPrimitiveWritableObject(o);
return t.hashCode();
+ case TIMESTAMPTZ:
+ TimestampTZWritable ht = ((TimestampTZObjectorInspector) poi)
+ .getPrimitiveWritableObject(o);
+ return ht.hashCode();
case INTERVAL_YEAR_MONTH:
HiveIntervalYearMonthWritable intervalYearMonth = ((HiveIntervalYearMonthObjectInspector) poi)
.getPrimitiveWritableObject(o);
@@ -948,6 +959,13 @@ public static int compare(Object o1, ObjectInspector oi1, Object o2,
.getPrimitiveWritableObject(o2);
return t1.compareTo(t2);
}
+ case TIMESTAMPTZ: {
+ TimestampTZWritable hts1 = ((TimestampTZObjectorInspector) poi1).
+ getPrimitiveWritableObject(o1);
+ TimestampTZWritable hts2 = ((TimestampTZObjectorInspector) poi2).
+ getPrimitiveWritableObject(o2);
+ return hts1.compareTo(hts2);
+ }
case INTERVAL_YEAR_MONTH: {
HiveIntervalYearMonthWritable i1 = ((HiveIntervalYearMonthObjectInspector) poi1)
.getPrimitiveWritableObject(o1);
@@ -1315,6 +1333,8 @@ private static boolean isInstanceOfSettablePrimitiveOI(PrimitiveObjectInspector
return oi instanceof SettableDateObjectInspector;
case TIMESTAMP:
return oi instanceof SettableTimestampObjectInspector;
+ case TIMESTAMPTZ:
+ return oi instanceof SettableTimestampTZObjectInspector;
case INTERVAL_YEAR_MONTH:
return oi instanceof SettableHiveIntervalYearMonthObjectInspector;
case INTERVAL_DAY_TIME:
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java
index 70633f3..e2c15ff 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java
@@ -31,8 +31,8 @@
*/
public static enum PrimitiveCategory {
VOID, BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, STRING,
- DATE, TIMESTAMP, BINARY, DECIMAL, VARCHAR, CHAR, INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME,
- UNKNOWN
+ DATE, TIMESTAMP, TIMESTAMPTZ, BINARY, DECIMAL, VARCHAR, CHAR, INTERVAL_YEAR_MONTH,
+ INTERVAL_DAY_TIME, UNKNOWN
};
public PrimitiveTypeInfo getTypeInfo();
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
index 509189e..c49531e 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
@@ -20,6 +20,7 @@
import java.sql.Timestamp;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
public class JavaTimestampObjectInspector
@@ -63,7 +64,7 @@ public Object set(Object o, Timestamp value) {
}
public Object set(Object o, byte[] bytes, int offset) {
- TimestampWritable.setTimestamp((Timestamp) o, bytes, offset);
+ TimestampWritableBase.setTimestamp((Timestamp) o, bytes, offset);
return o;
}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampTZObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampTZObjectInspector.java
new file mode 100644
index 0000000..80fdfb5
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampTZObjectInspector.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.objectinspector.primitive;
+
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableBase;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+import java.sql.Timestamp;
+
+public class JavaTimestampTZObjectInspector
+ extends AbstractPrimitiveJavaObjectInspector implements SettableTimestampTZObjectInspector {
+
+ protected JavaTimestampTZObjectInspector() {
+ super(TypeInfoFactory.timestamptzTypeInfo);
+ }
+
+ @Override
+ public Object set(Object o, byte[] bytes, int offset) {
+ TimestampWritableBase.setTimestamp((Timestamp) o, bytes, offset);
+ return o;
+ }
+
+ @Override
+ public Object set(Object o, TimestampTZ t) {
+ if (t == null) {
+ return null;
+ }
+ TimestampTZ tstz = (TimestampTZ) o;
+ tstz.setTime(t.getTime());
+ tstz.setNanos(t.getNanos());
+ tstz.setOffsetInMin(t.getOffsetInMin());
+ return tstz;
+ }
+
+ @Override
+ public Object set(Object o, TimestampTZWritable t) {
+ if (t == null) {
+ return null;
+ }
+ TimestampTZ tstz = (TimestampTZ) o;
+ TimestampTZ source = t.getTimestamp();
+ tstz.setTime(source.getTime());
+ tstz.setNanos(source.getNanos());
+ tstz.setOffsetInMin(source.getOffsetInMin());
+ return tstz;
+ }
+
+ @Override
+ public Object create(byte[] bytes, int offset) {
+ return TimestampTZWritable.createTimestampTZ(bytes, offset);
+ }
+
+ @Override
+ public Object create(TimestampTZ t) {
+ return copyObject(t);
+ }
+
+ @Override
+ public TimestampTZWritable getPrimitiveWritableObject(Object o) {
+ return o == null ? null : new TimestampTZWritable((TimestampTZ) o);
+ }
+
+ @Override
+ public TimestampTZ getPrimitiveJavaObject(Object o) {
+ return o == null ? null : (TimestampTZ) o;
+ }
+
+ @Override
+ public Object copyObject(Object o) {
+ if (o == null) {
+ return null;
+ }
+ TimestampTZ source = (TimestampTZ) o;
+ TimestampTZ copy = new TimestampTZ(source.getTime(), source.getOffsetInMin());
+ copy.setNanos(source.getNanos());
+ return copy;
+ }
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
index e08ad43..83a341f 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
@@ -25,10 +25,9 @@
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.common.type.HiveVarchar;
import org.apache.hadoop.hive.serde2.ByteStream;
-import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
-import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
import org.apache.hadoop.hive.serde2.lazy.LazyLong;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
@@ -292,6 +291,27 @@ public Object convert(Object input) {
}
}
+ public static class TimestampTZConverter implements Converter {
+ final PrimitiveObjectInspector inputOI;
+ final SettableTimestampTZObjectInspector outputOI;
+ final Object r;
+
+ public TimestampTZConverter(PrimitiveObjectInspector inputOI,
+ SettableTimestampTZObjectInspector outputOI) {
+ this.inputOI = inputOI;
+ this.outputOI = outputOI;
+ r = outputOI.create(new TimestampTZ(0, 0));
+ }
+
+ @Override
+ public Object convert(Object input) {
+ if (input == null) {
+ return null;
+ }
+ return outputOI.set(r, PrimitiveObjectInspectorUtils.getTimestampTZ(input, inputOI));
+ }
+ }
+
public static class HiveIntervalYearMonthConverter implements Converter {
PrimitiveObjectInspector inputOI;
SettableHiveIntervalYearMonthObjectInspector outputOI;
@@ -466,6 +486,10 @@ public Text convert(Object input) {
t.set(((TimestampObjectInspector) inputOI)
.getPrimitiveWritableObject(input).toString());
return t;
+ case TIMESTAMPTZ:
+ t.set(((TimestampTZObjectorInspector) inputOI)
+ .getPrimitiveWritableObject(input).toString());
+ return t;
case INTERVAL_YEAR_MONTH:
t.set(((HiveIntervalYearMonthObjectInspector) inputOI)
.getPrimitiveWritableObject(input).toString());
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
index 2ed0843..f005e57 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.serde2.io.HiveIntervalDayTimeWritable;
import org.apache.hadoop.hive.serde2.io.HiveIntervalYearMonthWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
@@ -87,6 +88,8 @@
new WritableDateObjectInspector();
public static final WritableTimestampObjectInspector writableTimestampObjectInspector =
new WritableTimestampObjectInspector();
+ public static final WritableTimestampTZObjectInspector writableTimestampTZObjectInspector =
+ new WritableTimestampTZObjectInspector();
public static final WritableHiveIntervalYearMonthObjectInspector writableHiveIntervalYearMonthObjectInspector =
new WritableHiveIntervalYearMonthObjectInspector();
public static final WritableHiveIntervalDayTimeObjectInspector writableHiveIntervalDayTimeObjectInspector =
@@ -124,6 +127,8 @@
writableDateObjectInspector);
cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME),
writableTimestampObjectInspector);
+ cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.TIMESTAMPTZ_TYPE_NAME),
+ writableTimestampTZObjectInspector);
cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME),
writableHiveIntervalYearMonthObjectInspector);
cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME),
@@ -149,6 +154,7 @@
primitiveCategoryToWritableOI.put(PrimitiveCategory.VOID, writableVoidObjectInspector);
primitiveCategoryToWritableOI.put(PrimitiveCategory.DATE, writableDateObjectInspector);
primitiveCategoryToWritableOI.put(PrimitiveCategory.TIMESTAMP, writableTimestampObjectInspector);
+ primitiveCategoryToWritableOI.put(PrimitiveCategory.TIMESTAMPTZ, writableTimestampTZObjectInspector);
primitiveCategoryToWritableOI.put(PrimitiveCategory.INTERVAL_YEAR_MONTH, writableHiveIntervalYearMonthObjectInspector);
primitiveCategoryToWritableOI.put(PrimitiveCategory.INTERVAL_DAY_TIME, writableHiveIntervalDayTimeObjectInspector);
primitiveCategoryToWritableOI.put(PrimitiveCategory.BINARY, writableBinaryObjectInspector);
@@ -181,6 +187,8 @@
new JavaDateObjectInspector();
public static final JavaTimestampObjectInspector javaTimestampObjectInspector =
new JavaTimestampObjectInspector();
+ public static final JavaTimestampTZObjectInspector javaTimestampTZObjectInspector =
+ new JavaTimestampTZObjectInspector();
public static final JavaHiveIntervalYearMonthObjectInspector javaHiveIntervalYearMonthObjectInspector =
new JavaHiveIntervalYearMonthObjectInspector();
public static final JavaHiveIntervalDayTimeObjectInspector javaHiveIntervalDayTimeObjectInspector =
@@ -218,6 +226,8 @@
javaDateObjectInspector);
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME),
javaTimestampObjectInspector);
+ cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.timestamptzTypeInfo,
+ javaTimestampTZObjectInspector);
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME),
javaHiveIntervalYearMonthObjectInspector);
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME),
@@ -243,6 +253,7 @@
primitiveCategoryToJavaOI.put(PrimitiveCategory.VOID, javaVoidObjectInspector);
primitiveCategoryToJavaOI.put(PrimitiveCategory.DATE, javaDateObjectInspector);
primitiveCategoryToJavaOI.put(PrimitiveCategory.TIMESTAMP, javaTimestampObjectInspector);
+ primitiveCategoryToJavaOI.put(PrimitiveCategory.TIMESTAMPTZ, javaTimestampTZObjectInspector);
primitiveCategoryToJavaOI.put(PrimitiveCategory.INTERVAL_YEAR_MONTH, javaHiveIntervalYearMonthObjectInspector);
primitiveCategoryToJavaOI.put(PrimitiveCategory.INTERVAL_DAY_TIME, javaHiveIntervalDayTimeObjectInspector);
primitiveCategoryToJavaOI.put(PrimitiveCategory.BINARY, javaByteArrayObjectInspector);
@@ -336,6 +347,8 @@ public static ConstantObjectInspector getPrimitiveWritableConstantObjectInspecto
return new WritableConstantDateObjectInspector((DateWritable)value);
case TIMESTAMP:
return new WritableConstantTimestampObjectInspector((TimestampWritable)value);
+ case TIMESTAMPTZ:
+ return new WritableConstantTimestampTZObjectInspector((TimestampTZWritable) value);
case INTERVAL_YEAR_MONTH:
return new WritableConstantHiveIntervalYearMonthObjectInspector((HiveIntervalYearMonthWritable) value);
case INTERVAL_DAY_TIME:
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java
index 51b529e..546db0b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java
@@ -27,7 +27,9 @@
import java.util.HashMap;
import java.util.Map;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.ql.util.TimestampUtils;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.common.type.HiveChar;
@@ -226,6 +228,9 @@ static void registerType(PrimitiveTypeEntry t) {
public static final PrimitiveTypeEntry timestampTypeEntry = new PrimitiveTypeEntry(
PrimitiveCategory.TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME, null,
Timestamp.class, TimestampWritable.class);
+ public static final PrimitiveTypeEntry timestampTZTypeEntry = new PrimitiveTypeEntry(
+ PrimitiveCategory.TIMESTAMPTZ, serdeConstants.TIMESTAMPTZ_TYPE_NAME, null,
+ TimestampTZ.class, TimestampTZWritable.class);
public static final PrimitiveTypeEntry intervalYearMonthTypeEntry = new PrimitiveTypeEntry(
PrimitiveCategory.INTERVAL_YEAR_MONTH, serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME, null,
HiveIntervalYearMonth.class, HiveIntervalYearMonthWritable.class);
@@ -261,6 +266,7 @@ static void registerType(PrimitiveTypeEntry t) {
registerType(shortTypeEntry);
registerType(dateTypeEntry);
registerType(timestampTypeEntry);
+ registerType(timestampTZTypeEntry);
registerType(intervalYearMonthTypeEntry);
registerType(intervalDayTimeTypeEntry);
registerType(decimalTypeEntry);
@@ -439,6 +445,10 @@ public static boolean comparePrimitiveObjects(Object o1,
return ((TimestampObjectInspector) oi1).getPrimitiveWritableObject(o1)
.equals(((TimestampObjectInspector) oi2).getPrimitiveWritableObject(o2));
}
+ case TIMESTAMPTZ: {
+ return ((TimestampTZObjectorInspector) oi1).getPrimitiveWritableObject(o1).equals(
+ ((TimestampTZObjectorInspector) oi2).getPrimitiveWritableObject(o2));
+ }
case INTERVAL_YEAR_MONTH: {
return ((HiveIntervalYearMonthObjectInspector) oi1).getPrimitiveWritableObject(o1)
.equals(((HiveIntervalYearMonthObjectInspector) oi2).getPrimitiveWritableObject(o2));
@@ -461,39 +471,6 @@ public static boolean comparePrimitiveObjects(Object o1,
}
/**
- * Convert a primitive object to double.
- */
- public static double convertPrimitiveToDouble(Object o, PrimitiveObjectInspector oi) {
- switch (oi.getPrimitiveCategory()) {
- case BOOLEAN:
- return ((BooleanObjectInspector) oi).get(o) ? 1 : 0;
- case BYTE:
- return ((ByteObjectInspector) oi).get(o);
- case SHORT:
- return ((ShortObjectInspector) oi).get(o);
- case INT:
- return ((IntObjectInspector) oi).get(o);
- case LONG:
- return ((LongObjectInspector) oi).get(o);
- case FLOAT:
- return ((FloatObjectInspector) oi).get(o);
- case DOUBLE:
- return ((DoubleObjectInspector) oi).get(o);
- case STRING:
- return Double.valueOf(((StringObjectInspector) oi).getPrimitiveJavaObject(o));
- case TIMESTAMP:
- return ((TimestampObjectInspector) oi).getPrimitiveWritableObject(o)
- .getDouble();
- case DECIMAL:
- // TODO: lossy conversion!
- return ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o).doubleValue();
- case DATE: // unsupported conversion
- default:
- throw new NumberFormatException();
- }
- }
-
- /**
* Compare 2 Primitive Objects with their Object Inspector, conversions
* allowed. Note that NULL does not equal to NULL according to SQL standard.
*/
@@ -509,8 +486,7 @@ public static boolean comparePrimitiveObjectsWithConversion(Object o1,
// If not equal, convert all to double and compare
try {
- return convertPrimitiveToDouble(o1, oi1) == convertPrimitiveToDouble(o2,
- oi2);
+ return getDouble(o1, oi1) == getDouble(o2, oi2);
} catch (NumberFormatException e) {
return false;
}
@@ -562,6 +538,10 @@ public static boolean getBoolean(Object o, PrimitiveObjectInspector oi) {
result = (((TimestampObjectInspector) oi)
.getPrimitiveWritableObject(o).getSeconds() != 0);
break;
+ case TIMESTAMPTZ:
+ result = (((TimestampTZObjectorInspector) oi)
+ .getPrimitiveWritableObject(o).getSeconds() != 0);
+ break;
case DECIMAL:
result = HiveDecimal.ZERO.compareTo(
((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o)) != 0;
@@ -652,6 +632,10 @@ public static int getInt(Object o, PrimitiveObjectInspector oi) {
result = (int) (((TimestampObjectInspector) oi)
.getPrimitiveWritableObject(o).getSeconds());
break;
+ case TIMESTAMPTZ:
+ result = (int) ((TimestampTZObjectorInspector) oi)
+ .getPrimitiveWritableObject(o).getSeconds();
+ break;
case DECIMAL:
result = ((HiveDecimalObjectInspector) oi)
.getPrimitiveJavaObject(o).intValue(); // TODO: lossy conversion!
@@ -716,6 +700,10 @@ public static long getLong(Object o, PrimitiveObjectInspector oi) {
result = ((TimestampObjectInspector) oi).getPrimitiveWritableObject(o)
.getSeconds();
break;
+ case TIMESTAMPTZ:
+ result = ((TimestampTZObjectorInspector) oi).getPrimitiveWritableObject(o)
+ .getSeconds();
+ break;
case DECIMAL:
result = ((HiveDecimalObjectInspector) oi)
.getPrimitiveJavaObject(o).longValue(); // TODO: lossy conversion!
@@ -772,6 +760,9 @@ public static double getDouble(Object o, PrimitiveObjectInspector oi) {
case TIMESTAMP:
result = ((TimestampObjectInspector) oi).getPrimitiveWritableObject(o).getDouble();
break;
+ case TIMESTAMPTZ:
+ result = ((TimestampTZObjectorInspector) oi).getPrimitiveWritableObject(o).getDouble();
+ break;
case DECIMAL:
result = ((HiveDecimalObjectInspector) oi)
.getPrimitiveJavaObject(o).doubleValue();
@@ -858,6 +849,9 @@ public static String getString(Object o, PrimitiveObjectInspector oi) {
case TIMESTAMP:
result = ((TimestampObjectInspector) oi).getPrimitiveWritableObject(o).toString();
break;
+ case TIMESTAMPTZ:
+ result = ((TimestampTZObjectorInspector) oi).getPrimitiveWritableObject(o).toString();
+ break;
case INTERVAL_YEAR_MONTH:
result = ((HiveIntervalYearMonthObjectInspector) oi).getPrimitiveWritableObject(o).toString();
break;
@@ -999,6 +993,10 @@ public static HiveDecimal getHiveDecimal(Object o, PrimitiveObjectInspector oi)
.getDouble();
result = HiveDecimal.create(ts.toString());
break;
+ case TIMESTAMPTZ:
+ Double tstz = ((TimestampTZObjectorInspector) oi).getPrimitiveWritableObject(o).getDouble();
+ result = HiveDecimal.create(tstz.toString());
+ break;
case DECIMAL:
result = ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o);
break;
@@ -1010,6 +1008,32 @@ public static HiveDecimal getHiveDecimal(Object o, PrimitiveObjectInspector oi)
return result;
}
+ public static TimestampTZ getTimestampTZ(Object o, PrimitiveObjectInspector oi) {
+ if (o == null) {
+ return null;
+ }
+
+ switch (oi.getPrimitiveCategory()) {
+ case STRING: {
+ StringObjectInspector soi = (StringObjectInspector) oi;
+ String s = soi.getPrimitiveJavaObject(o).trim();
+ return TimestampUtils.getTimestampTZOrNull(s);
+ }
+ case CHAR:
+ case VARCHAR: {
+ String s = getString(o, oi).trim();
+ return TimestampUtils.getTimestampTZOrNull(s);
+ }
+ case TIMESTAMPTZ: {
+ return ((TimestampTZObjectorInspector) oi).getPrimitiveWritableObject(o).getTimestamp();
+ }
+ default: {
+ throw new RuntimeException("Cannot convert to " + serdeConstants.TIMESTAMPTZ_TYPE_NAME +
+ " from: " + oi.getTypeName());
+ }
+ }
+ }
+
public static Date getDate(Object o, PrimitiveObjectInspector oi) {
if (o == null) {
return null;
@@ -1046,6 +1070,10 @@ public static Date getDate(Object o, PrimitiveObjectInspector oi) {
result = DateWritable.timeToDate(
((TimestampObjectInspector) oi).getPrimitiveWritableObject(o).getSeconds());
break;
+ case TIMESTAMPTZ:
+ result = DateWritable.timeToDate(
+ ((TimestampTZObjectorInspector) oi).getPrimitiveWritableObject(o).getSeconds());
+ break;
default:
throw new RuntimeException("Cannot convert to Date from: "
+ oi.getTypeName());
@@ -1115,6 +1143,11 @@ public static Timestamp getTimestamp(Object o, PrimitiveObjectInspector inputOI,
case TIMESTAMP:
result = ((TimestampObjectInspector) inputOI).getPrimitiveWritableObject(o).getTimestamp();
break;
+ case TIMESTAMPTZ:
+ TimestampTZ tstz = ((TimestampTZObjectorInspector) inputOI).
+ getPrimitiveWritableObject(o).getTimestamp();
+ result = new Timestamp(tstz.getTime());
+ result.setNanos(tstz.getNanos());
default:
throw new RuntimeException("Hive 2 Internal error: unknown type: "
+ inputOI.getTypeName());
@@ -1249,6 +1282,7 @@ public static PrimitiveGrouping getPrimitiveGrouping(PrimitiveCategory primitive
return PrimitiveGrouping.BOOLEAN_GROUP;
case TIMESTAMP:
case DATE:
+ case TIMESTAMPTZ:
return PrimitiveGrouping.DATE_GROUP;
case INTERVAL_YEAR_MONTH:
case INTERVAL_DAY_TIME:
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/SettableTimestampTZObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/SettableTimestampTZObjectInspector.java
new file mode 100644
index 0000000..93db81e
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/SettableTimestampTZObjectInspector.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.objectinspector.primitive;
+
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+
+public interface SettableTimestampTZObjectInspector extends TimestampTZObjectorInspector {
+
+ Object set(Object o, byte[] bytes, int offset);
+
+ Object set(Object o, TimestampTZ t);
+
+ Object set(Object o, TimestampTZWritable t);
+
+ Object create(byte[] bytes, int offset);
+
+ Object create(TimestampTZ t);
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/TimestampTZObjectorInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/TimestampTZObjectorInspector.java
new file mode 100644
index 0000000..d27b183
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/TimestampTZObjectorInspector.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.objectinspector.primitive;
+
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+
+/**
+ * For TimestampTZ.
+ */
+public interface TimestampTZObjectorInspector extends PrimitiveObjectInspector {
+
+ TimestampTZWritable getPrimitiveWritableObject(Object o);
+
+ TimestampTZ getPrimitiveJavaObject(Object o);
+
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantTimestampTZObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantTimestampTZObjectInspector.java
new file mode 100644
index 0000000..9a2119c
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantTimestampTZObjectInspector.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.objectinspector.primitive;
+
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
+
+public class WritableConstantTimestampTZObjectInspector extends
+ WritableTimestampTZObjectInspector implements ConstantObjectInspector {
+
+ private TimestampTZWritable value;
+
+ public WritableConstantTimestampTZObjectInspector(TimestampTZWritable value) {
+ this.value = value;
+ }
+
+ @Override
+ public TimestampTZWritable getWritableConstantValue() {
+ return value;
+ }
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampTZObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampTZObjectInspector.java
new file mode 100644
index 0000000..644e464
--- /dev/null
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampTZObjectInspector.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.serde2.objectinspector.primitive;
+
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.apache.hadoop.hive.serde2.io.TimestampTZWritable;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+public class WritableTimestampTZObjectInspector extends
+ AbstractPrimitiveWritableObjectInspector implements
+ SettableTimestampTZObjectInspector {
+
+ public WritableTimestampTZObjectInspector() {
+ super(TypeInfoFactory.timestamptzTypeInfo);
+ }
+
+ @Override
+ public Object set(Object o, byte[] bytes, int offset) {
+ ((TimestampTZWritable) o).set(bytes, offset);
+ return o;
+ }
+
+ @Override
+ public Object set(Object o, TimestampTZ t) {
+ if (t == null) {
+ return null;
+ }
+ ((TimestampTZWritable) o).set(t);
+ return o;
+ }
+
+ @Override
+ public Object set(Object o, TimestampTZWritable t) {
+ if (t == null) {
+ return null;
+ }
+ ((TimestampTZWritable) o).set(t);
+ return o;
+ }
+
+ @Override
+ public Object create(byte[] bytes, int offset) {
+ return new TimestampTZWritable(bytes, offset);
+ }
+
+ @Override
+ public Object create(TimestampTZ t) {
+ return new TimestampTZWritable(t);
+ }
+
+ @Override
+ public TimestampTZ getPrimitiveJavaObject(Object o) {
+ return o == null ? null : ((TimestampTZWritable) o).getTimestamp();
+ }
+
+ @Override
+ public Object copyObject(Object o) {
+ return o == null ? null : new TimestampTZWritable((TimestampTZWritable) o);
+ }
+
+ @Override
+ public TimestampTZWritable getPrimitiveWritableObject(Object o) {
+ return o == null ? null : (TimestampTZWritable) o;
+ }
+}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java
index 0ad8c02..761cf9a 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java
@@ -21,6 +21,7 @@
import java.sql.DatabaseMetaData;
import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hive.service.rpc.thrift.TTypeId;
@@ -70,6 +71,9 @@
TIMESTAMP_TYPE("TIMESTAMP",
java.sql.Types.TIMESTAMP,
TTypeId.TIMESTAMP_TYPE),
+ TIMESTAMPTZ_TYPE(serdeConstants.TIMESTAMPTZ_TYPE_NAME.toUpperCase(),
+ java.sql.Types.OTHER,
+ TTypeId.TIMESTAMPTZ_TYPE),
INTERVAL_YEAR_MONTH_TYPE("INTERVAL_YEAR_MONTH",
java.sql.Types.OTHER,
TTypeId.INTERVAL_YEAR_MONTH_TYPE),
@@ -225,6 +229,9 @@ public static Type getType(TypeInfo typeInfo) {
case TIMESTAMP: {
return Type.TIMESTAMP_TYPE;
}
+ case TIMESTAMPTZ: {
+ return Type.TIMESTAMPTZ_TYPE;
+ }
case INTERVAL_YEAR_MONTH: {
return Type.INTERVAL_YEAR_MONTH_TYPE;
}
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
index 43c4819..4982ac8 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
@@ -55,6 +55,7 @@ private TypeInfoFactory() {
public static final PrimitiveTypeInfo shortTypeInfo = new PrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME);
public static final PrimitiveTypeInfo dateTypeInfo = new PrimitiveTypeInfo(serdeConstants.DATE_TYPE_NAME);
public static final PrimitiveTypeInfo timestampTypeInfo = new PrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME);
+ public static final PrimitiveTypeInfo timestamptzTypeInfo = new PrimitiveTypeInfo(serdeConstants.TIMESTAMPTZ_TYPE_NAME);
public static final PrimitiveTypeInfo intervalYearMonthTypeInfo = new PrimitiveTypeInfo(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME);
public static final PrimitiveTypeInfo intervalDayTimeTypeInfo = new PrimitiveTypeInfo(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME);
public static final PrimitiveTypeInfo binaryTypeInfo = new PrimitiveTypeInfo(serdeConstants.BINARY_TYPE_NAME);
@@ -85,6 +86,7 @@ private TypeInfoFactory() {
cachedPrimitiveTypeInfo.put(serdeConstants.SMALLINT_TYPE_NAME, shortTypeInfo);
cachedPrimitiveTypeInfo.put(serdeConstants.DATE_TYPE_NAME, dateTypeInfo);
cachedPrimitiveTypeInfo.put(serdeConstants.TIMESTAMP_TYPE_NAME, timestampTypeInfo);
+ cachedPrimitiveTypeInfo.put(serdeConstants.TIMESTAMPTZ_TYPE_NAME, timestamptzTypeInfo);
cachedPrimitiveTypeInfo.put(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME, intervalYearMonthTypeInfo);
cachedPrimitiveTypeInfo.put(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME, intervalDayTimeTypeInfo);
cachedPrimitiveTypeInfo.put(serdeConstants.BINARY_TYPE_NAME, binaryTypeInfo);
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java
index 3c483cc..d7494cd 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java
@@ -17,9 +17,6 @@
*/
package org.apache.hadoop.hive.serde2.io;
-import com.google.code.tempusfugit.concurrency.annotations.*;
-import com.google.code.tempusfugit.concurrency.*;
-
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
@@ -35,13 +32,21 @@
import java.util.Random;
import java.util.TimeZone;
+import com.google.code.tempusfugit.concurrency.ConcurrentRule;
+import com.google.code.tempusfugit.concurrency.RepeatingRule;
+import com.google.code.tempusfugit.concurrency.annotations.Concurrent;
+import com.google.code.tempusfugit.concurrency.annotations.Repeating;
import org.apache.hadoop.hive.ql.util.TimestampUtils;
-import org.junit.*;
-import static org.junit.Assert.*;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
public class TestTimestampWritable {
@@ -494,6 +499,22 @@ public void testBinarySortable() {
}
@Test
+ public void test2ndMSBOfDecimal() {
+ // The current decimal part ranges in [-1000000000, 999999999]. We should be able to use its
+ // second MSB to indicate if a timezone offset exists
+ int decimal = -1000000000;
+ final int mask = 1 << 30;
+ while (decimal < 0) {
+ assertTrue((decimal & mask) != 0);
+ decimal++;
+ }
+ while (decimal <= 999999999) {
+ assertTrue((decimal & mask) == 0);
+ decimal++;
+ }
+ }
+
+ @Test
public void testSetTimestamp() {
// one VInt without nanos
verifySetTimestamp(1000);
diff --git a/service-rpc/if/TCLIService.thrift b/service-rpc/if/TCLIService.thrift
index a4fa7b0..613d4b2 100644
--- a/service-rpc/if/TCLIService.thrift
+++ b/service-rpc/if/TCLIService.thrift
@@ -63,6 +63,9 @@ enum TProtocolVersion {
// V9 adds support for serializing ResultSets in SerDe
HIVE_CLI_SERVICE_PROTOCOL_V9
+
+ // V10 adds timestamptz type
+ HIVE_CLI_SERVICE_PROTOCOL_V10
}
enum TTypeId {
@@ -87,7 +90,8 @@ enum TTypeId {
VARCHAR_TYPE,
CHAR_TYPE,
INTERVAL_YEAR_MONTH_TYPE,
- INTERVAL_DAY_TIME_TYPE
+ INTERVAL_DAY_TIME_TYPE,
+ TIMESTAMPTZ_TYPE
}
const set PRIMITIVE_TYPES = [
@@ -107,7 +111,8 @@ const set PRIMITIVE_TYPES = [
TTypeId.VARCHAR_TYPE,
TTypeId.CHAR_TYPE,
TTypeId.INTERVAL_YEAR_MONTH_TYPE,
- TTypeId.INTERVAL_DAY_TIME_TYPE
+ TTypeId.INTERVAL_DAY_TIME_TYPE,
+ TTypeId.TIMESTAMPTZ_TYPE
]
const set COMPLEX_TYPES = [
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_constants.cpp b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_constants.cpp
index 991cb2e..201ec0d 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_constants.cpp
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_constants.cpp
@@ -28,6 +28,7 @@ TCLIServiceConstants::TCLIServiceConstants() {
PRIMITIVE_TYPES.insert((TTypeId::type)19);
PRIMITIVE_TYPES.insert((TTypeId::type)20);
PRIMITIVE_TYPES.insert((TTypeId::type)21);
+ PRIMITIVE_TYPES.insert((TTypeId::type)22);
COMPLEX_TYPES.insert((TTypeId::type)10);
COMPLEX_TYPES.insert((TTypeId::type)11);
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
index 2f460e8..aaf9fc5 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
@@ -22,7 +22,8 @@ int _kTProtocolVersionValues[] = {
TProtocolVersion::HIVE_CLI_SERVICE_PROTOCOL_V6,
TProtocolVersion::HIVE_CLI_SERVICE_PROTOCOL_V7,
TProtocolVersion::HIVE_CLI_SERVICE_PROTOCOL_V8,
- TProtocolVersion::HIVE_CLI_SERVICE_PROTOCOL_V9
+ TProtocolVersion::HIVE_CLI_SERVICE_PROTOCOL_V9,
+ TProtocolVersion::HIVE_CLI_SERVICE_PROTOCOL_V10
};
const char* _kTProtocolVersionNames[] = {
"HIVE_CLI_SERVICE_PROTOCOL_V1",
@@ -33,9 +34,10 @@ const char* _kTProtocolVersionNames[] = {
"HIVE_CLI_SERVICE_PROTOCOL_V6",
"HIVE_CLI_SERVICE_PROTOCOL_V7",
"HIVE_CLI_SERVICE_PROTOCOL_V8",
- "HIVE_CLI_SERVICE_PROTOCOL_V9"
+ "HIVE_CLI_SERVICE_PROTOCOL_V9",
+ "HIVE_CLI_SERVICE_PROTOCOL_V10"
};
-const std::map _TProtocolVersion_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(9, _kTProtocolVersionValues, _kTProtocolVersionNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+const std::map _TProtocolVersion_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(10, _kTProtocolVersionValues, _kTProtocolVersionNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kTTypeIdValues[] = {
TTypeId::BOOLEAN_TYPE,
@@ -59,7 +61,8 @@ int _kTTypeIdValues[] = {
TTypeId::VARCHAR_TYPE,
TTypeId::CHAR_TYPE,
TTypeId::INTERVAL_YEAR_MONTH_TYPE,
- TTypeId::INTERVAL_DAY_TIME_TYPE
+ TTypeId::INTERVAL_DAY_TIME_TYPE,
+ TTypeId::TIMESTAMPTZ_TYPE
};
const char* _kTTypeIdNames[] = {
"BOOLEAN_TYPE",
@@ -83,9 +86,10 @@ const char* _kTTypeIdNames[] = {
"VARCHAR_TYPE",
"CHAR_TYPE",
"INTERVAL_YEAR_MONTH_TYPE",
- "INTERVAL_DAY_TIME_TYPE"
+ "INTERVAL_DAY_TIME_TYPE",
+ "TIMESTAMPTZ_TYPE"
};
-const std::map _TTypeId_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(22, _kTTypeIdValues, _kTTypeIdNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+const std::map _TTypeId_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(23, _kTTypeIdValues, _kTTypeIdNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kTStatusCodeValues[] = {
TStatusCode::SUCCESS_STATUS,
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
index b249544..5604799 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
@@ -29,7 +29,8 @@ struct TProtocolVersion {
HIVE_CLI_SERVICE_PROTOCOL_V6 = 5,
HIVE_CLI_SERVICE_PROTOCOL_V7 = 6,
HIVE_CLI_SERVICE_PROTOCOL_V8 = 7,
- HIVE_CLI_SERVICE_PROTOCOL_V9 = 8
+ HIVE_CLI_SERVICE_PROTOCOL_V9 = 8,
+ HIVE_CLI_SERVICE_PROTOCOL_V10 = 9
};
};
@@ -58,7 +59,8 @@ struct TTypeId {
VARCHAR_TYPE = 18,
CHAR_TYPE = 19,
INTERVAL_YEAR_MONTH_TYPE = 20,
- INTERVAL_DAY_TIME_TYPE = 21
+ INTERVAL_DAY_TIME_TYPE = 21,
+ TIMESTAMPTZ_TYPE = 22
};
};
diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIServiceConstants.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIServiceConstants.java
index 930bed7..762d46a 100644
--- a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIServiceConstants.java
+++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIServiceConstants.java
@@ -55,6 +55,7 @@
PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.CHAR_TYPE);
PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.INTERVAL_YEAR_MONTH_TYPE);
PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.INTERVAL_DAY_TIME_TYPE);
+ PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.TIMESTAMPTZ_TYPE);
}
public static final Set COMPLEX_TYPES = new HashSet();
diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TProtocolVersion.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TProtocolVersion.java
index bce2a0c..18a7825 100644
--- a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TProtocolVersion.java
+++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TProtocolVersion.java
@@ -20,7 +20,8 @@
HIVE_CLI_SERVICE_PROTOCOL_V6(5),
HIVE_CLI_SERVICE_PROTOCOL_V7(6),
HIVE_CLI_SERVICE_PROTOCOL_V8(7),
- HIVE_CLI_SERVICE_PROTOCOL_V9(8);
+ HIVE_CLI_SERVICE_PROTOCOL_V9(8),
+ HIVE_CLI_SERVICE_PROTOCOL_V10(9);
private final int value;
@@ -59,6 +60,8 @@ public static TProtocolVersion findByValue(int value) {
return HIVE_CLI_SERVICE_PROTOCOL_V8;
case 8:
return HIVE_CLI_SERVICE_PROTOCOL_V9;
+ case 9:
+ return HIVE_CLI_SERVICE_PROTOCOL_V10;
default:
return null;
}
diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TTypeId.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TTypeId.java
index a3735eb..1b062b7 100644
--- a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TTypeId.java
+++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TTypeId.java
@@ -33,7 +33,8 @@
VARCHAR_TYPE(18),
CHAR_TYPE(19),
INTERVAL_YEAR_MONTH_TYPE(20),
- INTERVAL_DAY_TIME_TYPE(21);
+ INTERVAL_DAY_TIME_TYPE(21),
+ TIMESTAMPTZ_TYPE(22);
private final int value;
@@ -98,6 +99,8 @@ public static TTypeId findByValue(int value) {
return INTERVAL_YEAR_MONTH_TYPE;
case 21:
return INTERVAL_DAY_TIME_TYPE;
+ case 22:
+ return TIMESTAMPTZ_TYPE;
default:
return null;
}
diff --git a/service-rpc/src/gen/thrift/gen-php/Types.php b/service-rpc/src/gen/thrift/gen-php/Types.php
index 786c773..4445092 100644
--- a/service-rpc/src/gen/thrift/gen-php/Types.php
+++ b/service-rpc/src/gen/thrift/gen-php/Types.php
@@ -25,6 +25,7 @@ final class TProtocolVersion {
const HIVE_CLI_SERVICE_PROTOCOL_V7 = 6;
const HIVE_CLI_SERVICE_PROTOCOL_V8 = 7;
const HIVE_CLI_SERVICE_PROTOCOL_V9 = 8;
+ const HIVE_CLI_SERVICE_PROTOCOL_V10 = 9;
static public $__names = array(
0 => 'HIVE_CLI_SERVICE_PROTOCOL_V1',
1 => 'HIVE_CLI_SERVICE_PROTOCOL_V2',
@@ -35,6 +36,7 @@ final class TProtocolVersion {
6 => 'HIVE_CLI_SERVICE_PROTOCOL_V7',
7 => 'HIVE_CLI_SERVICE_PROTOCOL_V8',
8 => 'HIVE_CLI_SERVICE_PROTOCOL_V9',
+ 9 => 'HIVE_CLI_SERVICE_PROTOCOL_V10',
);
}
@@ -61,6 +63,7 @@ final class TTypeId {
const CHAR_TYPE = 19;
const INTERVAL_YEAR_MONTH_TYPE = 20;
const INTERVAL_DAY_TIME_TYPE = 21;
+ const TIMESTAMPTZ_TYPE = 22;
static public $__names = array(
0 => 'BOOLEAN_TYPE',
1 => 'TINYINT_TYPE',
@@ -84,6 +87,7 @@ final class TTypeId {
19 => 'CHAR_TYPE',
20 => 'INTERVAL_YEAR_MONTH_TYPE',
21 => 'INTERVAL_DAY_TIME_TYPE',
+ 22 => 'TIMESTAMPTZ_TYPE',
);
}
@@ -9724,6 +9728,7 @@ final class Constant extends \Thrift\Type\TConstant {
19 => true,
20 => true,
21 => true,
+ 22 => true,
);
}
diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/constants.py b/service-rpc/src/gen/thrift/gen-py/TCLIService/constants.py
index c8d4f8f..e002291 100644
--- a/service-rpc/src/gen/thrift/gen-py/TCLIService/constants.py
+++ b/service-rpc/src/gen/thrift/gen-py/TCLIService/constants.py
@@ -27,6 +27,7 @@
19,
20,
21,
+ 22,
])
COMPLEX_TYPES = set([
10,
diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
index fdf6b1f..d07ce10 100644
--- a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
+++ b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
@@ -26,6 +26,7 @@ class TProtocolVersion:
HIVE_CLI_SERVICE_PROTOCOL_V7 = 6
HIVE_CLI_SERVICE_PROTOCOL_V8 = 7
HIVE_CLI_SERVICE_PROTOCOL_V9 = 8
+ HIVE_CLI_SERVICE_PROTOCOL_V10 = 9
_VALUES_TO_NAMES = {
0: "HIVE_CLI_SERVICE_PROTOCOL_V1",
@@ -37,6 +38,7 @@ class TProtocolVersion:
6: "HIVE_CLI_SERVICE_PROTOCOL_V7",
7: "HIVE_CLI_SERVICE_PROTOCOL_V8",
8: "HIVE_CLI_SERVICE_PROTOCOL_V9",
+ 9: "HIVE_CLI_SERVICE_PROTOCOL_V10",
}
_NAMES_TO_VALUES = {
@@ -49,6 +51,7 @@ class TProtocolVersion:
"HIVE_CLI_SERVICE_PROTOCOL_V7": 6,
"HIVE_CLI_SERVICE_PROTOCOL_V8": 7,
"HIVE_CLI_SERVICE_PROTOCOL_V9": 8,
+ "HIVE_CLI_SERVICE_PROTOCOL_V10": 9,
}
class TTypeId:
@@ -74,6 +77,7 @@ class TTypeId:
CHAR_TYPE = 19
INTERVAL_YEAR_MONTH_TYPE = 20
INTERVAL_DAY_TIME_TYPE = 21
+ TIMESTAMPTZ_TYPE = 22
_VALUES_TO_NAMES = {
0: "BOOLEAN_TYPE",
@@ -98,6 +102,7 @@ class TTypeId:
19: "CHAR_TYPE",
20: "INTERVAL_YEAR_MONTH_TYPE",
21: "INTERVAL_DAY_TIME_TYPE",
+ 22: "TIMESTAMPTZ_TYPE",
}
_NAMES_TO_VALUES = {
@@ -123,6 +128,7 @@ class TTypeId:
"CHAR_TYPE": 19,
"INTERVAL_YEAR_MONTH_TYPE": 20,
"INTERVAL_DAY_TIME_TYPE": 21,
+ "TIMESTAMPTZ_TYPE": 22,
}
class TStatusCode:
diff --git a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_constants.rb b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_constants.rb
index 25adbb4..b7bbebc 100644
--- a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_constants.rb
+++ b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_constants.rb
@@ -25,6 +25,7 @@ PRIMITIVE_TYPES = Set.new([
19,
20,
21,
+ 22,
])
COMPLEX_TYPES = Set.new([
diff --git a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
index 4b1854c..80bc8ba 100644
--- a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
+++ b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
@@ -16,8 +16,9 @@ module TProtocolVersion
HIVE_CLI_SERVICE_PROTOCOL_V7 = 6
HIVE_CLI_SERVICE_PROTOCOL_V8 = 7
HIVE_CLI_SERVICE_PROTOCOL_V9 = 8
- VALUE_MAP = {0 => "HIVE_CLI_SERVICE_PROTOCOL_V1", 1 => "HIVE_CLI_SERVICE_PROTOCOL_V2", 2 => "HIVE_CLI_SERVICE_PROTOCOL_V3", 3 => "HIVE_CLI_SERVICE_PROTOCOL_V4", 4 => "HIVE_CLI_SERVICE_PROTOCOL_V5", 5 => "HIVE_CLI_SERVICE_PROTOCOL_V6", 6 => "HIVE_CLI_SERVICE_PROTOCOL_V7", 7 => "HIVE_CLI_SERVICE_PROTOCOL_V8", 8 => "HIVE_CLI_SERVICE_PROTOCOL_V9"}
- VALID_VALUES = Set.new([HIVE_CLI_SERVICE_PROTOCOL_V1, HIVE_CLI_SERVICE_PROTOCOL_V2, HIVE_CLI_SERVICE_PROTOCOL_V3, HIVE_CLI_SERVICE_PROTOCOL_V4, HIVE_CLI_SERVICE_PROTOCOL_V5, HIVE_CLI_SERVICE_PROTOCOL_V6, HIVE_CLI_SERVICE_PROTOCOL_V7, HIVE_CLI_SERVICE_PROTOCOL_V8, HIVE_CLI_SERVICE_PROTOCOL_V9]).freeze
+ HIVE_CLI_SERVICE_PROTOCOL_V10 = 9
+ VALUE_MAP = {0 => "HIVE_CLI_SERVICE_PROTOCOL_V1", 1 => "HIVE_CLI_SERVICE_PROTOCOL_V2", 2 => "HIVE_CLI_SERVICE_PROTOCOL_V3", 3 => "HIVE_CLI_SERVICE_PROTOCOL_V4", 4 => "HIVE_CLI_SERVICE_PROTOCOL_V5", 5 => "HIVE_CLI_SERVICE_PROTOCOL_V6", 6 => "HIVE_CLI_SERVICE_PROTOCOL_V7", 7 => "HIVE_CLI_SERVICE_PROTOCOL_V8", 8 => "HIVE_CLI_SERVICE_PROTOCOL_V9", 9 => "HIVE_CLI_SERVICE_PROTOCOL_V10"}
+ VALID_VALUES = Set.new([HIVE_CLI_SERVICE_PROTOCOL_V1, HIVE_CLI_SERVICE_PROTOCOL_V2, HIVE_CLI_SERVICE_PROTOCOL_V3, HIVE_CLI_SERVICE_PROTOCOL_V4, HIVE_CLI_SERVICE_PROTOCOL_V5, HIVE_CLI_SERVICE_PROTOCOL_V6, HIVE_CLI_SERVICE_PROTOCOL_V7, HIVE_CLI_SERVICE_PROTOCOL_V8, HIVE_CLI_SERVICE_PROTOCOL_V9, HIVE_CLI_SERVICE_PROTOCOL_V10]).freeze
end
module TTypeId
@@ -43,8 +44,9 @@ module TTypeId
CHAR_TYPE = 19
INTERVAL_YEAR_MONTH_TYPE = 20
INTERVAL_DAY_TIME_TYPE = 21
- VALUE_MAP = {0 => "BOOLEAN_TYPE", 1 => "TINYINT_TYPE", 2 => "SMALLINT_TYPE", 3 => "INT_TYPE", 4 => "BIGINT_TYPE", 5 => "FLOAT_TYPE", 6 => "DOUBLE_TYPE", 7 => "STRING_TYPE", 8 => "TIMESTAMP_TYPE", 9 => "BINARY_TYPE", 10 => "ARRAY_TYPE", 11 => "MAP_TYPE", 12 => "STRUCT_TYPE", 13 => "UNION_TYPE", 14 => "USER_DEFINED_TYPE", 15 => "DECIMAL_TYPE", 16 => "NULL_TYPE", 17 => "DATE_TYPE", 18 => "VARCHAR_TYPE", 19 => "CHAR_TYPE", 20 => "INTERVAL_YEAR_MONTH_TYPE", 21 => "INTERVAL_DAY_TIME_TYPE"}
- VALID_VALUES = Set.new([BOOLEAN_TYPE, TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE, FLOAT_TYPE, DOUBLE_TYPE, STRING_TYPE, TIMESTAMP_TYPE, BINARY_TYPE, ARRAY_TYPE, MAP_TYPE, STRUCT_TYPE, UNION_TYPE, USER_DEFINED_TYPE, DECIMAL_TYPE, NULL_TYPE, DATE_TYPE, VARCHAR_TYPE, CHAR_TYPE, INTERVAL_YEAR_MONTH_TYPE, INTERVAL_DAY_TIME_TYPE]).freeze
+ TIMESTAMPTZ_TYPE = 22
+ VALUE_MAP = {0 => "BOOLEAN_TYPE", 1 => "TINYINT_TYPE", 2 => "SMALLINT_TYPE", 3 => "INT_TYPE", 4 => "BIGINT_TYPE", 5 => "FLOAT_TYPE", 6 => "DOUBLE_TYPE", 7 => "STRING_TYPE", 8 => "TIMESTAMP_TYPE", 9 => "BINARY_TYPE", 10 => "ARRAY_TYPE", 11 => "MAP_TYPE", 12 => "STRUCT_TYPE", 13 => "UNION_TYPE", 14 => "USER_DEFINED_TYPE", 15 => "DECIMAL_TYPE", 16 => "NULL_TYPE", 17 => "DATE_TYPE", 18 => "VARCHAR_TYPE", 19 => "CHAR_TYPE", 20 => "INTERVAL_YEAR_MONTH_TYPE", 21 => "INTERVAL_DAY_TIME_TYPE", 22 => "TIMESTAMPTZ_TYPE"}
+ VALID_VALUES = Set.new([BOOLEAN_TYPE, TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE, FLOAT_TYPE, DOUBLE_TYPE, STRING_TYPE, TIMESTAMP_TYPE, BINARY_TYPE, ARRAY_TYPE, MAP_TYPE, STRUCT_TYPE, UNION_TYPE, USER_DEFINED_TYPE, DECIMAL_TYPE, NULL_TYPE, DATE_TYPE, VARCHAR_TYPE, CHAR_TYPE, INTERVAL_YEAR_MONTH_TYPE, INTERVAL_DAY_TIME_TYPE, TIMESTAMPTZ_TYPE]).freeze
end
module TStatusCode
diff --git a/service/src/java/org/apache/hive/service/cli/ColumnValue.java b/service/src/java/org/apache/hive/service/cli/ColumnValue.java
index 76e8c03..7195f87 100644
--- a/service/src/java/org/apache/hive/service/cli/ColumnValue.java
+++ b/service/src/java/org/apache/hive/service/cli/ColumnValue.java
@@ -191,6 +191,7 @@ public static TColumnValue toTColumnValue(TypeDescriptor typeDescriptor, Object
case DATE_TYPE:
return dateValue((Date)value);
case TIMESTAMP_TYPE:
+ case TIMESTAMPTZ_TYPE:
return timestampValue((Timestamp)value);
case INTERVAL_YEAR_MONTH_TYPE:
return stringValue((HiveIntervalYearMonth) value);
diff --git a/service/src/java/org/apache/hive/service/cli/TypeDescriptor.java b/service/src/java/org/apache/hive/service/cli/TypeDescriptor.java
index d634bef..ad61fc3 100644
--- a/service/src/java/org/apache/hive/service/cli/TypeDescriptor.java
+++ b/service/src/java/org/apache/hive/service/cli/TypeDescriptor.java
@@ -116,6 +116,8 @@ public Integer getColumnSize() {
return 10;
case TIMESTAMP_TYPE:
return 29;
+ case TIMESTAMPTZ_TYPE:
+ return 39;
default:
return null;
}
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/TimestampTZ.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/TimestampTZ.java
new file mode 100644
index 0000000..7eef512
--- /dev/null
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/TimestampTZ.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common.type;
+
+import org.apache.commons.math3.util.Pair;
+
+import java.sql.Timestamp;
+import java.text.DateFormat;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.TimeZone;
+
+/**
+ * A sub-class of java.sql.Timestamp, with timezoneID offset.
+ * Any timestamp that requires a specific timezone should use this type.
+ */
+public class TimestampTZ extends Timestamp {
+ private static final ThreadLocal threadLocalDateFormat =
+ new ThreadLocal() {
+ @Override
+ protected DateFormat initialValue() {
+ return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ }
+ };
+
+ // We store the offset from UTC in minutes . Ranges from [-12:00, 14:00].
+ private int offsetInMin;
+
+ private transient String internalID = null;
+
+ private static final int MAX_OFFSET = 840;
+ private static final int MIN_OFFSET = -720;
+
+ // Used to indicate no offset is present
+ public static final int NULL_OFFSET = -800;
+
+ public TimestampTZ(long time, String timezoneID) {
+ super(time);
+ offsetInMin = computeOffset(timezoneID);
+ }
+
+ public TimestampTZ(long time, int offsetInMin) {
+ super(time);
+ this.offsetInMin = validateOffset(offsetInMin);
+ }
+
+ private int computeOffset(String timezoneID) {
+ validateTimezoneID(timezoneID);
+ TimeZone tz = TimeZone.getTimeZone(timezoneID);
+ return tz.getOffset(getTime()) / 1000 / 60;
+ }
+
+ public int getOffsetInMin() {
+ return offsetInMin;
+ }
+
+ public void setOffsetInMin(int offsetInMin) {
+ this.offsetInMin = validateOffset(offsetInMin);
+ internalID = null;
+ }
+
+ private String getTimezoneID() {
+ if (internalID == null) {
+ StringBuilder builder = new StringBuilder("GMT");
+ if (offsetInMin != 0) {
+ if (offsetInMin > 0) {
+ builder.append("+");
+ } else {
+ builder.append("-");
+ }
+ int tmp = offsetInMin > 0 ? offsetInMin : -offsetInMin;
+ int offsetHour = tmp / 60;
+ int offsetMin = tmp % 60;
+ builder.append(String.format("%02d", offsetHour)).append(":").
+ append(String.format("%02d", offsetMin));
+ }
+ internalID = builder.toString();
+ }
+ return internalID;
+ }
+
+ private static void validateTimezoneID(String timezoneID) {
+ if (timezoneID == null) {
+ throw new IllegalArgumentException("Timezone ID is null");
+ }
+ TimeZone tz = TimeZone.getTimeZone(timezoneID);
+ // We may end up with GMT in case of invalid timezoneID
+ if (tz.getID().equals("GMT") && !tz.getID().equals(timezoneID)) {
+ throw new IllegalArgumentException("Unknown timezoneID: " + timezoneID);
+ }
+ }
+
+ @Override
+ public String toString() {
+ String ts = super.toString();
+ DateFormat dateFormat = threadLocalDateFormat.get();
+ TimeZone defaultTZ = dateFormat.getTimeZone();
+ try {
+ String timezoneID = getTimezoneID();
+ dateFormat.setTimeZone(TimeZone.getTimeZone(timezoneID));
+ String r = dateFormat.format(this) + ts.substring(19);
+ r += " " + timezoneID;
+ return r;
+ } finally {
+ dateFormat.setTimeZone(defaultTZ);
+ }
+ }
+
+ @Override
+ public int compareTo(Timestamp ts) {
+ int result = super.compareTo(ts);
+ if (result == 0) {
+ if (ts instanceof TimestampTZ) {
+ result = offsetInMin - ((TimestampTZ) ts).offsetInMin;
+ } else {
+ result = 1;
+ }
+ }
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof Timestamp) {
+ return compareTo((Timestamp) o) == 0;
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode() ^ offsetInMin;
+ }
+
+ public static TimestampTZ valueOf(String timestamp) {
+ Pair pair = extractTimezoneID(timestamp);
+ return valueOf(pair.getFirst(), pair.getSecond());
+ }
+
+ public static Timestamp parseWithFallback(String timestamp) {
+ Pair pair = extractTimezoneID(timestamp);
+ if (pair.getSecond() == null) {
+ return Timestamp.valueOf(timestamp);
+ }
+ return valueOf(pair.getFirst(), pair.getSecond());
+ }
+
+ public static TimestampTZ valueOf(String timestamp, String timezoneID) {
+ Timestamp ts = Timestamp.valueOf(timestamp);
+ validateTimezoneID(timezoneID);
+ DateFormat dateFormat = threadLocalDateFormat.get();
+ TimeZone defaultTZ = dateFormat.getTimeZone();
+ try {
+ int nanos = ts.getNanos();
+ dateFormat.setTimeZone(TimeZone.getTimeZone(timezoneID));
+ Date date = dateFormat.parse(timestamp);
+ TimestampTZ timestampTZ = new TimestampTZ(date.getTime(), timezoneID);
+ timestampTZ.setNanos(nanos);
+ return timestampTZ;
+ } catch (ParseException e) {
+ throw new IllegalArgumentException(e);
+ } finally {
+ dateFormat.setTimeZone(defaultTZ);
+ }
+ }
+
+ // parse s into a timestamp with a timezoneID
+ private static Pair extractTimezoneID(String s) {
+ s = s.trim();
+ int divide = s.indexOf(' ');
+ if (divide != -1) {
+ divide = s.indexOf(' ', divide + 1);
+ if (divide != -1) {
+ return new Pair<>(s.substring(0, divide), s.substring(divide + 1));
+ }
+ }
+ return new Pair<>(s, null);
+ }
+
+ public static boolean isValidOffset(int offsetInMin) {
+ return offsetInMin >= MIN_OFFSET && offsetInMin <= MAX_OFFSET;
+ }
+
+ private static int validateOffset(int offsetInMin) {
+ if (!isValidOffset(offsetInMin) && offsetInMin != NULL_OFFSET) {
+ throw new IllegalArgumentException("Timezone offset out of range: " + offsetInMin);
+ }
+ return offsetInMin;
+ }
+}
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/util/JavaDataModel.java b/storage-api/src/java/org/apache/hadoop/hive/ql/util/JavaDataModel.java
index 4a745e4..168b7fa 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/util/JavaDataModel.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/util/JavaDataModel.java
@@ -307,6 +307,9 @@ public int lengthForBooleanArrayOfSize(int length) {
public int lengthForTimestampArrayOfSize(int length) {
return lengthForPrimitiveArrayOfSize(lengthOfTimestamp(), length);
}
+ public int lengthForTimestampTZArrayOfSize(int length) {
+ return lengthForPrimitiveArrayOfSize(lengthOfTimestampTZ(), length);
+ }
public int lengthForDateArrayOfSize(int length) {
return lengthForPrimitiveArrayOfSize(lengthOfDate(), length);
}
@@ -334,6 +337,10 @@ public int lengthOfTimestamp() {
return object() + primitive2();
}
+ public int lengthOfTimestampTZ() {
+ return lengthOfTimestamp() + primitive2();
+ }
+
public int lengthOfDate() {
// object overhead + 8 bytes for long (fastTime) + 16 bytes for cdate
return object() + 3 * primitive2();
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java b/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
index 41db9ca..dfe7530 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
@@ -19,6 +19,9 @@
package org.apache.hadoop.hive.ql.util;
import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.sql.Timestamp;
@@ -27,6 +30,7 @@
* Utitilities for Timestamps and the relevant conversions.
*/
public class TimestampUtils {
+ private static final Logger LOG = LoggerFactory.getLogger(TimestampUtils.class);
public static final BigDecimal BILLION_BIG_DECIMAL = BigDecimal.valueOf(1000000000);
/**
@@ -99,4 +103,15 @@ public static long millisToSeconds(long millis) {
return (millis - 999) / 1000;
}
}
+
+ public static TimestampTZ getTimestampTZOrNull(String s) {
+ TimestampTZ result;
+ try {
+ result = TimestampTZ.valueOf(s);
+ } catch (IllegalArgumentException e) {
+ LOG.debug("Invalid string " + s + " for TIMESTAMP WITH TIME ZONE", e);
+ result = null;
+ }
+ return result;
+ }
}