diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 47a1107..13ebfb2 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -344,7 +344,8 @@ minitez.query.files=bucket_map_join_tez1.q,\ vectorized_dynamic_partition_pruning.q,\ tez_multi_union.q,\ tez_join.q,\ - tez_union_multiinsert.q + tez_union_multiinsert.q,\ + tez_smb_test.q encrypted.query.files=encryption_join_unencrypted_tbl.q,\ encryption_insert_partition_static.q,\ diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index bcffdbc..f9ac10d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.ql.exec.MuxOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorFactory; +import org.apache.hadoop.hive.ql.exec.OperatorUtils; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator; import org.apache.hadoop.hive.ql.lib.Node; @@ -369,6 +370,28 @@ private boolean checkConvertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcCont (ReduceSinkOperator) joinOp.getParentOperators().get(bigTablePosition); int numBuckets = bigTableRS.getParentOperators().get(0).getOpTraits().getNumBuckets(); + int size = -1; + for (Operator parentOp : joinOp.getParentOperators()) { + // each side better have 0 or more RS. if either side is unbalanced, cannot convert. + // This is a workaround for now. Right fix would be to refactor code in the + // MapRecordProcessor and ReduceRecordProcessor with respect to the sources. + Set set = + OperatorUtils.findOperatorsUpstream(parentOp.getParentOperators(), + ReduceSinkOperator.class); + if (size < 0) { + size = set.size(); + continue; + } + + if (size != set.size()) { + return false; + } + /* + * if (size < 0) { size = set.size(); continue; } if (((size > 0) && (set.size() > 0)) || + * ((size == 0) && (set.size() == 0))) { continue; } else { return false; } + */ + } + // the sort and bucket cols have to match on both sides for this // transformation of the join operation for (Operator parentOp : joinOp.getParentOperators()) { diff --git ql/src/test/queries/clientpositive/tez_smb_1.q ql/src/test/queries/clientpositive/tez_smb_1.q index 580672f..b3b72e1 100644 --- ql/src/test/queries/clientpositive/tez_smb_1.q +++ ql/src/test/queries/clientpositive/tez_smb_1.q @@ -34,6 +34,22 @@ set hive.auto.convert.join.noconditionaltask.size=500; explain select count(*) from tab s1 join tab s3 on s1.key=s3.key; +set hive.convert.join.bucket.mapjoin.tez = false; +explain +select count(*) from +tab vt1 +join +(select rt2.id from +(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +where vt1.key=vt2.id; + +select count(*) from +tab vt1 +join +(select rt2.id from +(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +where vt1.key=vt2.id; + set hive.auto.convert.join=false; explain diff --git ql/src/test/results/clientpositive/tez/tez_smb_1.q.out ql/src/test/results/clientpositive/tez/tez_smb_1.q.out index d970bd9..3054e5f 100644 --- ql/src/test/results/clientpositive/tez/tez_smb_1.q.out +++ ql/src/test/results/clientpositive/tez/tez_smb_1.q.out @@ -320,3 +320,146 @@ POSTHOOK: Input: default@tab_part POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### 480 +PREHOOK: query: explain +select count(*) from +(select rt1.id from +(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +join +(select rt2.id from +(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +where vt1.id=vt2.id +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from +(select rt1.id from +(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +join +(select rt2.id from +(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +where vt1.id=vt2.id +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 4 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) + Reducer 5 <- Reducer 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Map 3 + Map Operator Tree: + TableScan + alias: t2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reducer 4 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col0 = _col1) (type: boolean) + Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from +(select rt1.id from +(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +join +(select rt2.id from +(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +where vt1.id=vt2.id +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from +(select rt1.id from +(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +join +(select rt2.id from +(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +where vt1.id=vt2.id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +480