diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java index ffa2a1f262..db7794c33b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableFunctionScan.java @@ -25,12 +25,13 @@ import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.TableFunctionScan; +import org.apache.calcite.rel.logical.LogicalTableFunctionScan; import org.apache.calcite.rel.metadata.RelColumnMapping; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rex.RexNode; import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; -public class HiveTableFunctionScan extends TableFunctionScan implements HiveRelNode { +public class HiveTableFunctionScan extends LogicalTableFunctionScan implements HiveRelNode { /** * @param cluster @@ -61,7 +62,7 @@ public static HiveTableFunctionScan create(RelOptCluster cluster, RelTraitSet tr } @Override - public TableFunctionScan copy(RelTraitSet traitSet, List inputs, RexNode rexCall, + public LogicalTableFunctionScan copy(RelTraitSet traitSet, List inputs, RexNode rexCall, Type elementType, RelDataType rowType, Set columnMappings) { return new HiveTableFunctionScan(getCluster(), traitSet, inputs, rexCall, elementType, rowType, columnMappings); diff --git ql/src/test/results/clientpositive/except_all.q.out ql/src/test/results/clientpositive/except_all.q.out index 020cba4287..5d1dc2211a 100644 --- ql/src/test/results/clientpositive/except_all.q.out +++ ql/src/test/results/clientpositive/except_all.q.out @@ -276,10 +276,10 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: sum(_col4), sum(_col3) + aggregations: sum(_col3), sum(_col2) keys: _col0 (type: string), _col1 (type: string) minReductionHashAggr: 0.99 mode: hash @@ -297,10 +297,10 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: sum(_col4), sum(_col3) + aggregations: sum(_col3), sum(_col2) keys: _col0 (type: string), _col1 (type: string) minReductionHashAggr: 0.99 mode: hash @@ -467,10 +467,10 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: sum(_col4), sum(_col3) + aggregations: sum(_col3), sum(_col2) keys: _col0 (type: string), _col1 (type: string) minReductionHashAggr: 0.99 mode: hash @@ -488,10 +488,10 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: sum(_col4), sum(_col3) + aggregations: sum(_col3), sum(_col2) keys: _col0 (type: string), _col1 (type: string) minReductionHashAggr: 0.99 mode: hash diff --git ql/src/test/results/clientpositive/intersect_all_rj.q.out ql/src/test/results/clientpositive/intersect_all_rj.q.out index b8ff98ae79..427b841a1b 100644 --- ql/src/test/results/clientpositive/intersect_all_rj.q.out +++ ql/src/test/results/clientpositive/intersect_all_rj.q.out @@ -180,12 +180,12 @@ HiveProject($f0=[$1]) HiveAggregate(group=[{0}], agg#0=[count()]) HiveProject($f0=[$0]) HiveAggregate(group=[{0}]) - HiveProject($f0=[CASE(IS NOT NULL($7), $7, if($5, $8, $6))]) - HiveJoin(condition=[>=($1, $13)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(int_col_10=[$0], bigint_col_3=[$1], BLOCK__OFFSET__INSIDE__FILE=[$2], INPUT__FILE__NAME=[$3], CAST=[CAST($4):RecordType(BIGINT writeid, INTEGER bucketid, BIGINT rowid)]) + HiveProject($f0=[CASE(IS NOT NULL($3), $3, if($1, $4, $2))]) + HiveJoin(condition=[>=($0, $5)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(bigint_col_3=[$1]) HiveFilter(condition=[IS NOT NULL($1)]) HiveTableScan(table=[[default, table_7]], table:alias=[a3]) - HiveProject(boolean_col_16=[$0], timestamp_col_5=[$1], timestamp_col_15=[$2], timestamp_col_30=[$3], int_col_18=[$4], BLOCK__OFFSET__INSIDE__FILE=[$5], INPUT__FILE__NAME=[$6], ROW__ID=[$7], CAST=[CAST($4):BIGINT]) + HiveProject(boolean_col_16=[$0], timestamp_col_5=[$1], timestamp_col_15=[$2], timestamp_col_30=[$3], CAST=[CAST($4):BIGINT]) HiveFilter(condition=[IS NOT NULL(CAST($4):BIGINT)]) HiveTableScan(table=[[default, table_10]], table:alias=[a4]) HiveProject($f0=[$0], $f1=[$1]) diff --git ql/src/test/results/clientpositive/llap/intersect_all_rj.q.out ql/src/test/results/clientpositive/llap/intersect_all_rj.q.out index cdfbc2239e..c47452fabd 100644 --- ql/src/test/results/clientpositive/llap/intersect_all_rj.q.out +++ ql/src/test/results/clientpositive/llap/intersect_all_rj.q.out @@ -180,12 +180,12 @@ HiveProject($f0=[$1]) HiveAggregate(group=[{0}], agg#0=[count()]) HiveProject($f0=[$0]) HiveAggregate(group=[{0}]) - HiveProject($f0=[CASE(IS NOT NULL($7), $7, if($5, $8, $6))]) - HiveJoin(condition=[>=($1, $13)], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(int_col_10=[$0], bigint_col_3=[$1], BLOCK__OFFSET__INSIDE__FILE=[$2], INPUT__FILE__NAME=[$3], CAST=[CAST($4):RecordType(BIGINT writeid, INTEGER bucketid, BIGINT rowid)]) + HiveProject($f0=[CASE(IS NOT NULL($3), $3, if($1, $4, $2))]) + HiveJoin(condition=[>=($0, $5)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(bigint_col_3=[$1]) HiveFilter(condition=[IS NOT NULL($1)]) HiveTableScan(table=[[default, table_7]], table:alias=[a3]) - HiveProject(boolean_col_16=[$0], timestamp_col_5=[$1], timestamp_col_15=[$2], timestamp_col_30=[$3], int_col_18=[$4], BLOCK__OFFSET__INSIDE__FILE=[$5], INPUT__FILE__NAME=[$6], ROW__ID=[$7], CAST=[CAST($4):BIGINT]) + HiveProject(boolean_col_16=[$0], timestamp_col_5=[$1], timestamp_col_15=[$2], timestamp_col_30=[$3], CAST=[CAST($4):BIGINT]) HiveFilter(condition=[IS NOT NULL(CAST($4):BIGINT)]) HiveTableScan(table=[[default, table_10]], table:alias=[a4]) HiveProject($f0=[$0], $f1=[$1])