diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 20501cf75e..aa0ea379e5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -1512,7 +1512,9 @@ public boolean supportsCharSet() { .getTypeFactory()); final JdbcImplementor.Result result = jdbcImplementor.visitChild(0, optimizedOptiqPlan); String sql = result.asStatement().toSqlString(dialect).getSql(); - return sql.replaceAll("VARCHAR\\(2147483647\\)", "STRING"); + return sql + .replaceAll("VARCHAR\\(2147483647\\)", "STRING") // VARCHAR(INTEGER.MAX) -> STRING + .replaceAll("TIMESTAMP\\(9\\)", "TIMESTAMP"); // TIMESTAMP(9) -> TIMESTAMP } catch (Exception ex) { LOG.warn("Rel2SQL Rewrite threw error", ex); } diff --git a/ql/src/test/queries/clientpositive/timestamp.q b/ql/src/test/queries/clientpositive/timestamp.q index 1453cf87ff..f3f09b32fc 100644 --- a/ql/src/test/queries/clientpositive/timestamp.q +++ b/ql/src/test/queries/clientpositive/timestamp.q @@ -4,6 +4,8 @@ set hive.mapred.mode=nonstrict; explain select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5; +explain extended select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5; + select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5; explain select cast('2011-01-01 01:01:01.123' as timestamp) as c from src union select cast('2011-01-01 01:01:01.123' as timestamp) as c from src limit 5; diff --git a/ql/src/test/results/clientpositive/timestamp.q.out b/ql/src/test/results/clientpositive/timestamp.q.out index c59b5181fc..aaa9998788 100644 --- a/ql/src/test/results/clientpositive/timestamp.q.out +++ b/ql/src/test/results/clientpositive/timestamp.q.out @@ -83,6 +83,178 @@ STAGE PLANS: Processor Tree: ListSink +PREHOOK: query: explain extended select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: explain extended select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +OPTIMIZED SQL: SELECT CAST(TIMESTAMP '2011-01-01 01:01:01.000000000' AS TIMESTAMP) AS `c` +FROM (SELECT CAST(TIMESTAMP '2011-01-01 01:01:01.000000000' AS TIMESTAMP) AS `$f0` +FROM `default`.`src` +UNION ALL +SELECT CAST(TIMESTAMP '2011-01-01 01:01:01.000000000' AS TIMESTAMP) AS `$f0` +FROM `default`.`src`) AS `t1` +GROUP BY TRUE +LIMIT 5 +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE + Union + Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: true (type: boolean) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: boolean) + null sort order: a + sort order: + + Map-reduce partition columns: _col0 (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 + TopN: 5 + TopN Hash Memory Usage: 0.1 + auto parallelism: false + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE + Union + Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: true (type: boolean) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: boolean) + null sort order: a + sort order: + + Map-reduce partition columns: _col0 (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 + TopN: 5 + TopN Hash Memory Usage: 0.1 + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: src + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 500 + rawDataSize 5312 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 500 + rawDataSize 5312 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [$hdt$_0:$hdt$_0-subquery1:src, $hdt$_0:$hdt$_0-subquery2:src] + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: boolean) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: TIMESTAMP'2011-01-01 01:01:01' (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 5 + Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0 + columns.types timestamp + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: 5 + Processor Tree: + ListSink + PREHOOK: query: select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src