diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index a622095..05b7f48 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -815,9 +815,11 @@ boolean validateReduceWorkOperator(Operator op) { ret = validateSelectOperator((SelectOperator) op); break; case REDUCESINK: - ret = validateReduceSinkOperator((ReduceSinkOperator) op); - break; + ret = validateReduceSinkOperator((ReduceSinkOperator) op); + break; case FILESINK: + ret = validateFileSinkOperator((FileSinkOperator) op); + break; case LIMIT: ret = true; break; @@ -899,6 +901,15 @@ private boolean validateExtractOperator(ExtractOperator op) { return true; } + private boolean validateFileSinkOperator(FileSinkOperator op) { + // HIVE-7557: For now, turn off dynamic partitioning to give more time to + // figure out how to make VectorFileSink work correctly with it... + if (op.getConf().getDynPartCtx() != null) { + return false; + } + return true; + } + private boolean validateExprNodeDesc(List descs) { return validateExprNodeDesc(descs, VectorExpressionDescriptor.Mode.PROJECTION); } diff --git ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out index 5083ae2..2897c41 100644 --- ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out +++ ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out @@ -14,6 +14,7 @@ PREHOOK: query: create table over1k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k POSTHOOK: query: create table over1k( t tinyint, si smallint, @@ -42,6 +43,7 @@ POSTHOOK: Output: default@over1k PREHOOK: query: create table over1k_orc like over1k PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_orc POSTHOOK: query: create table over1k_orc like over1k POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -81,6 +83,7 @@ PREHOOK: query: create table over1k_part_orc( partitioned by (ds string, t tinyint) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_orc POSTHOOK: query: create table over1k_part_orc( si smallint, i int, @@ -93,6 +96,7 @@ POSTHOOK: Output: default@over1k_part_orc PREHOOK: query: create table over1k_part_limit_orc like over1k_part_orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_limit_orc POSTHOOK: query: create table over1k_part_limit_orc like over1k_part_orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -114,6 +118,7 @@ PREHOOK: query: create table over1k_part_buck_orc( clustered by (si) into 4 buckets stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_buck_orc POSTHOOK: query: create table over1k_part_buck_orc( si smallint, i int, @@ -134,6 +139,7 @@ PREHOOK: query: create table over1k_part_buck_sort_orc( sorted by (f) into 4 buckets stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_buck_sort_orc POSTHOOK: query: create table over1k_part_buck_sort_orc( si smallint, i int, @@ -195,6 +201,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -280,6 +287,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -564,6 +572,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -649,6 +658,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -1292,6 +1302,7 @@ create table over1k_part2_orc( partitioned by (ds string, t tinyint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part2_orc POSTHOOK: query: -- tests for HIVE-6883 create table over1k_part2_orc( si smallint, @@ -1419,6 +1430,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -1762,6 +1774,7 @@ create table over1k_part_buck_sort2_orc( sorted by (f) into 1 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_buck_sort2_orc POSTHOOK: query: -- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets. -- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number -- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test.