diff --git a/llap-server/src/main/resources/package.py b/llap-server/src/main/resources/package.py index c48ff79691..9eb3fd734b 100644 --- a/llap-server/src/main/resources/package.py +++ b/llap-server/src/main/resources/package.py @@ -130,6 +130,8 @@ def main(args): service_keytab_path += "/" + service_keytab else: service_keytab_path = service_keytab + if service_keytab_path: + service_keytab_path = "hdfs:///user/hive/" + service_keytab_path if not input: print "Cannot find input files" diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java index f0b41f36f3..f451cc9cb6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec; +import java.security.acl.Group; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; @@ -592,4 +593,23 @@ private static boolean areBacktrackedExprsCompatible(final List or } return currRS; } + + /*** + * Given group by operator on reduce side, this tries to get to the group by on map side (partial/merge) + * @param reduceSideGbOp Make sure this is group by side reducer + * @return map side gb if any, else null + */ + public static GroupByOperator findMapSideGb(final GroupByOperator reduceSideGbOp) { + Operator parentOp = reduceSideGbOp; + while(parentOp.getParentOperators() != null && parentOp.getParentOperators().size() > 0) { + if(parentOp.getParentOperators().size() > 1) { + return null; + } + parentOp = parentOp.getParentOperators().get(0); + if(parentOp instanceof GroupByOperator ) { + return (GroupByOperator)parentOp; + } + } + return null; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 32fba6c8ff..5fbad85397 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -1382,7 +1382,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } else { // Case 3: column stats, hash aggregation, NO grouping sets cardinality = Math.min(parentNumRows/2, StatsUtils.safeMult(ndvProduct, parallelism)); - long orgParentNumRows = getParentNumRows(gop, gop.getConf().getKeys(), conf); + long orgParentNumRows = StatsUtils.safeMult(getParentNumRows(gop, gop.getConf().getKeys(), conf) , + parallelism); cardinality = Math.min(cardinality, orgParentNumRows); if (LOG.isDebugEnabled()) { @@ -1410,8 +1411,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // in reduce side GBY, we don't know if the grouping set was present or not. so get it // from map side GBY - GroupByOperator mGop = OperatorUtils.findSingleOperatorUpstream(parent, GroupByOperator.class); - if (mGop != null) { + //GroupByOperator mGop = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(parent, GroupByOperator.class); + GroupByOperator mGop = OperatorUtils.findMapSideGb(gop); + if(mGop != null) { containsGroupingSet = mGop.getConf().isGroupingSetsPresent(); } @@ -1425,7 +1427,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } } else { // Case 9: column stats, NO grouping sets - cardinality = Math.min(parentNumRows, ndvProduct); + // to get to the source number of rows we should be using original group by + GroupByOperator gOpStats = mGop; + if(gOpStats == null) { + // it could be NULL in case the plan has single group by (instead of merge and final) e.g. autogather stats + gOpStats = gop; + } + long orgParentNumRows = getParentNumRows(gOpStats, gOpStats.getConf().getKeys(), conf) ; + cardinality = Math.min(orgParentNumRows, ndvProduct); if (LOG.isDebugEnabled()) { LOG.debug("[Case 9] STATS-" + gop.toString() + ": cardinality: " + cardinality); diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out index fe30d3197c..5ebc0706e0 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out @@ -837,10 +837,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out index 91369679aa..b35241e383 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out @@ -829,10 +829,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: bigint) mode: mergepartial outputColumnNames: state, zip - Statistics: Num rows: 4 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out index 4e6885d9a2..8b607428c7 100644 --- a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out @@ -1734,15 +1734,15 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint) Reducer 3 @@ -1751,7 +1751,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 10 Statistics: Num rows: 10 Data size: 950 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out index 5de5a69f87..ff3b5efe84 100644 --- a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out @@ -1682,7 +1682,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Map Join Operator condition map: Inner Join 0 to 1 @@ -1691,7 +1691,7 @@ STAGE PLANS: 1 _col0 (type: int), _col1 (type: string) input vertices: 1 Map 4 - Statistics: Num rows: 242 Data size: 1936 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 484 Data size: 3872 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -1804,7 +1804,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Map Join Operator condition map: Inner Join 0 to 1 @@ -1813,7 +1813,7 @@ STAGE PLANS: 1 _col0 (type: int), _col1 (type: string) input vertices: 1 Map 4 - Statistics: Num rows: 242 Data size: 1936 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 484 Data size: 3872 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash diff --git a/ql/src/test/results/clientpositive/llap/check_constraint.q.out b/ql/src/test/results/clientpositive/llap/check_constraint.q.out index 2b1564879c..fc05e6ee10 100644 --- a/ql/src/test/results/clientpositive/llap/check_constraint.q.out +++ b/ql/src/test/results/clientpositive/llap/check_constraint.q.out @@ -1756,11 +1756,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 73500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 147000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: int), _col3 (type: decimal(5,2)), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 51750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 103500 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 10 Statistics: Num rows: 10 Data size: 2070 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/constraints_optimization.q.out b/ql/src/test/results/clientpositive/llap/constraints_optimization.q.out index bf60646636..fe8397112f 100644 --- a/ql/src/test/results/clientpositive/llap/constraints_optimization.q.out +++ b/ql/src/test/results/clientpositive/llap/constraints_optimization.q.out @@ -1538,14 +1538,14 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out index efa2dd818a..c4e9d6fc1c 100644 --- a/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out +++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out @@ -1466,10 +1466,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 32 Data size: 4828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 64 Data size: 9565 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 32 Data size: 4828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 64 Data size: 9565 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1603,10 +1603,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 32 Data size: 4828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 64 Data size: 9565 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 32 Data size: 4828 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 64 Data size: 9565 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2936,11 +2936,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 19 Data size: 3477 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 4575 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: hash(_col0) (type: int), hash(_col1) (type: int), hash(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 19 Data size: 3477 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 4575 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col0), sum(_col1), sum(_col2) mode: hash @@ -3093,11 +3093,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 19 Data size: 3477 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 4575 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: hash(_col0) (type: int), hash(_col1) (type: int), hash(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 19 Data size: 3477 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 4575 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col0), sum(_col1), sum(_col2) mode: hash diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out index 93a3017696..857020e107 100644 --- a/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out +++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out @@ -3406,21 +3406,21 @@ STAGE PLANS: 0 _col0 (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string), _col4 (type: bigint) sort order: +++++ - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Execution mode: vectorized, llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3456,12 +3456,12 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 395 Data size: 73470 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 395 Data size: 73470 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string), _col2 (type: bigint) Stage: Stage-0 @@ -3672,10 +3672,10 @@ STAGE PLANS: 0 _col0 (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3711,12 +3711,12 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 395 Data size: 73470 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 395 Data size: 73470 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string), _col2 (type: bigint) Stage: Stage-0 @@ -3934,7 +3934,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 395 Data size: 73470 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Map Join Operator condition map: Inner Join 0 to 1 @@ -3944,10 +3944,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 input vertices: 0 Reducer 2 - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 7000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out index bd3c7769a4..98e0e82077 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out @@ -1702,14 +1702,14 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1718,18 +1718,18 @@ STAGE PLANS: Select Operator expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), 'foo' (type: string), _col4 (type: tinyint) outputColumnNames: si, i, b, f, ds, t - Statistics: Num rows: 5 Data size: 555 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 11 Data size: 1221 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(si, 'hll'), compute_stats(i, 'hll'), compute_stats(b, 'hll'), compute_stats(f, 'hll') keys: ds (type: string), t (type: tinyint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 2 Data size: 3574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 8935 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: tinyint) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint) - Statistics: Num rows: 2 Data size: 3574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 8935 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct) Reducer 3 Execution mode: llap @@ -1739,14 +1739,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: tinyint) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 2 Data size: 3702 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 9255 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col0 (type: string), _col1 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 2 Data size: 3702 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 9255 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 3702 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 9255 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1825,15 +1825,15 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false Dp Sort State: PARTITION_SORTED - Statistics: Num rows: 5 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out index 30074abaf2..cb8dc83847 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out @@ -1192,14 +1192,14 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1208,7 +1208,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int) outputColumnNames: ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(ss_net_paid_inc_tax, 'hll'), compute_stats(ss_net_profit, 'hll') keys: ss_sold_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out index dca0ebdab8..07ab2202d1 100644 --- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out +++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out @@ -3062,11 +3062,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: UDFToInteger(_col0) (type: int), CAST( _col0 AS decimal(5,2)) (type: decimal(5,2)), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 51750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 103500 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 Statistics: Num rows: 2 Data size: 414 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/except_distinct.q.out b/ql/src/test/results/clientpositive/llap/except_distinct.q.out index c155a73c96..46b29c38f0 100644 --- a/ql/src/test/results/clientpositive/llap/except_distinct.q.out +++ b/ql/src/test/results/clientpositive/llap/except_distinct.q.out @@ -277,26 +277,26 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -306,17 +306,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) - Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 32204 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 41 Data size: 7298 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 41 Data size: 7298 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -329,26 +329,26 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Union 3 Vertex: Union 3 @@ -459,26 +459,26 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 274 Data size: 53156 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 137 Data size: 26578 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 137 Data size: 26578 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 12 Execution mode: vectorized, llap @@ -488,26 +488,26 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -517,26 +517,26 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -546,39 +546,39 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) - Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 32204 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 32204 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col0 (type: string), _col1 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 291 Data size: 56454 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 145 Data size: 28130 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 145 Data size: 28130 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 6 Execution mode: vectorized, llap @@ -588,39 +588,39 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 145 Data size: 28130 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) - Statistics: Num rows: 24 Data size: 4656 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 32204 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 24 Data size: 4656 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 32204 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col0 (type: string), _col1 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 24 Data size: 4464 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 24 Data size: 4656 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 274 Data size: 53156 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 137 Data size: 26578 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 137 Data size: 26578 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -630,17 +630,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 137 Data size: 26578 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) - Statistics: Num rows: 22 Data size: 4268 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 32204 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 22 Data size: 3916 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 22 Data size: 3916 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -653,26 +653,26 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 291 Data size: 56454 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), sum(_col3) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 145 Data size: 28130 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 145 Data size: 28130 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out index c86450aae2..4a0e412b1a 100644 --- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out @@ -355,9 +355,9 @@ Stage-0 Stage-1 Reducer 2 llap File Output Operator [FS_6] - Select Operator [SEL_5] (rows=10 width=97) + Select Operator [SEL_5] (rows=20 width=92) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_3] @@ -391,18 +391,18 @@ Stage-0 Stage-1 Reducer 3 llap File Output Operator [FS_11] - Select Operator [SEL_10] (rows=5 width=20) + Select Operator [SEL_10] (rows=10 width=20) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_9] (rows=5 width=20) + Group By Operator [GBY_9] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_8] PartitionCols:_col0, _col1 - Group By Operator [GBY_7] (rows=5 width=20) + Group By Operator [GBY_7] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col0, _col1 - Select Operator [SEL_5] (rows=10 width=101) + Select Operator [SEL_5] (rows=20 width=96) Output:["_col0","_col1"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_3] @@ -445,22 +445,22 @@ Stage-0 Stage-1 Reducer 4 llap File Output Operator [FS_29] - Select Operator [SEL_27] (rows=1 width=20) + Select Operator [SEL_27] (rows=4 width=19) Output:["_col0","_col1","_col2"] <-Reducer 3 [SIMPLE_EDGE] llap SHUFFLE [RS_26] - Select Operator [SEL_25] (rows=1 width=28) + Select Operator [SEL_25] (rows=4 width=27) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_24] (rows=1 width=20) + Group By Operator [GBY_24] (rows=4 width=19) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_23] PartitionCols:_col0, _col1 - Group By Operator [GBY_22] (rows=1 width=20) + Group By Operator [GBY_22] (rows=2 width=20) Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col5, _col1 - Select Operator [SEL_21] (rows=1 width=24) + Select Operator [SEL_21] (rows=4 width=24) Output:["_col1","_col5"] - Merge Join Operator [MERGEJOIN_57] (rows=1 width=24) + Merge Join Operator [MERGEJOIN_57] (rows=4 width=24) Conds:RS_17._col0=RS_18._col0(Inner),RS_18._col0=RS_19._col0(Inner),Output:["_col1","_col2","_col4","_col5","_col7"],residual filter predicates:{((_col4 > 0) or _col2)} {((_col4 + _col7) >= 0)} <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_17] @@ -490,9 +490,9 @@ Stage-0 <-Reducer 8 [SIMPLE_EDGE] llap SHUFFLE [RS_19] PartitionCols:_col0 - Select Operator [SEL_16] (rows=2 width=89) + Select Operator [SEL_16] (rows=5 width=89) Output:["_col0","_col1"] - Group By Operator [GBY_15] (rows=2 width=93) + Group By Operator [GBY_15] (rows=5 width=93) Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 7 [SIMPLE_EDGE] llap SHUFFLE [RS_14] @@ -1420,13 +1420,13 @@ Stage-0 File Output Operator [FS_9] Limit [LIM_8] (rows=1 width=97) Number of rows:1 - Select Operator [SEL_7] (rows=10 width=97) + Select Operator [SEL_7] (rows=20 width=92) Output:["_col0","_col1","_col2"] <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_6] - Select Operator [SEL_5] (rows=10 width=97) + Select Operator [SEL_5] (rows=20 width=92) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_3] @@ -1463,22 +1463,22 @@ Stage-0 File Output Operator [FS_14] Limit [LIM_13] (rows=1 width=20) Number of rows:1 - Select Operator [SEL_12] (rows=5 width=20) + Select Operator [SEL_12] (rows=10 width=20) Output:["_col0","_col1","_col2"] <-Reducer 3 [SIMPLE_EDGE] llap SHUFFLE [RS_11] - Group By Operator [GBY_9] (rows=5 width=20) + Group By Operator [GBY_9] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_8] PartitionCols:_col0, _col1 - Group By Operator [GBY_7] (rows=5 width=20) + Group By Operator [GBY_7] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col0 - Top N Key Operator [TNK_15] (rows=10 width=101) + Top N Key Operator [TNK_15] (rows=20 width=96) keys:_col1, _col0,sort order:++,top n:1 - Select Operator [SEL_5] (rows=10 width=101) + Select Operator [SEL_5] (rows=20 width=96) Output:["_col0","_col1"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_3] @@ -1591,24 +1591,24 @@ Stage-0 Stage-1 Reducer 6 llap File Output Operator [FS_42] - Limit [LIM_40] (rows=1 width=28) + Limit [LIM_40] (rows=4 width=27) Number of rows:5 - Select Operator [SEL_39] (rows=1 width=28) + Select Operator [SEL_39] (rows=4 width=27) Output:["_col0","_col1","_col2"] <-Reducer 5 [SIMPLE_EDGE] llap SHUFFLE [RS_38] - Select Operator [SEL_37] (rows=1 width=28) + Select Operator [SEL_37] (rows=4 width=27) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_36] (rows=1 width=20) + Group By Operator [GBY_36] (rows=4 width=19) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 4 [SIMPLE_EDGE] llap SHUFFLE [RS_35] PartitionCols:_col0, _col1 - Group By Operator [GBY_34] (rows=1 width=20) + Group By Operator [GBY_34] (rows=2 width=20) Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col4, _col7 - Select Operator [SEL_33] (rows=2 width=28) + Select Operator [SEL_33] (rows=4 width=28) Output:["_col4","_col7"] - Merge Join Operator [MERGEJOIN_64] (rows=2 width=28) + Merge Join Operator [MERGEJOIN_64] (rows=4 width=28) Conds:RS_29._col0=RS_30._col0(Inner),RS_30._col0=RS_31._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col7","_col8"],residual filter predicates:{((_col3 + _col1) >= 0)} {(_col5 or _col8)} <-Map 10 [SIMPLE_EDGE] llap SHUFFLE [RS_31] @@ -1622,17 +1622,17 @@ Stage-0 <-Reducer 3 [SIMPLE_EDGE] llap SHUFFLE [RS_29] PartitionCols:_col0 - Filter Operator [FIL_11] (rows=2 width=105) + Filter Operator [FIL_11] (rows=3 width=105) predicate:_col0 is not null - Limit [LIM_9] (rows=3 width=105) + Limit [LIM_9] (rows=5 width=88) Number of rows:5 - Select Operator [SEL_8] (rows=3 width=105) + Select Operator [SEL_8] (rows=6 width=90) Output:["_col0","_col1"] <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_7] - Select Operator [SEL_6] (rows=3 width=105) + Select Operator [SEL_6] (rows=6 width=90) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_5] (rows=3 width=101) + Group By Operator [GBY_5] (rows=6 width=85) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_4] @@ -1646,19 +1646,19 @@ Stage-0 <-Reducer 9 [SIMPLE_EDGE] llap SHUFFLE [RS_30] PartitionCols:_col0 - Select Operator [SEL_25] (rows=2 width=101) + Select Operator [SEL_25] (rows=3 width=101) Output:["_col0","_col1","_col2","_col3"] - Filter Operator [FIL_24] (rows=2 width=97) + Filter Operator [FIL_24] (rows=3 width=97) predicate:_col0 is not null - Limit [LIM_22] (rows=3 width=97) + Limit [LIM_22] (rows=5 width=80) Number of rows:5 - Select Operator [SEL_21] (rows=3 width=97) + Select Operator [SEL_21] (rows=6 width=82) Output:["_col0","_col1","_col2"] <-Reducer 8 [SIMPLE_EDGE] llap SHUFFLE [RS_20] - Select Operator [SEL_19] (rows=3 width=97) + Select Operator [SEL_19] (rows=6 width=82) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_18] (rows=3 width=101) + Group By Operator [GBY_18] (rows=6 width=85) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 7 [SIMPLE_EDGE] llap SHUFFLE [RS_17] @@ -2037,9 +2037,9 @@ Stage-0 Output:["_col0","_col1"] Group By Operator [GBY_12] (rows=83 width=91) Output:["_col0"],keys:_col1 - Select Operator [SEL_8] (rows=83 width=178) + Select Operator [SEL_8] (rows=166 width=178) Output:["_col1"] - Group By Operator [GBY_7] (rows=83 width=178) + Group By Operator [GBY_7] (rows=166 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_6] @@ -2083,17 +2083,17 @@ Stage-0 Stage-1 Reducer 2 llap File Output Operator [FS_18] - Select Operator [SEL_17] (rows=250 width=178) + Select Operator [SEL_17] (rows=500 width=178) Output:["_col0","_col1"] - Filter Operator [FIL_16] (rows=250 width=179) + Filter Operator [FIL_16] (rows=500 width=179) predicate:_col4 is null - Merge Join Operator [MERGEJOIN_31] (rows=333 width=179) + Merge Join Operator [MERGEJOIN_31] (rows=832 width=179) Conds:GBY_4._col0, _col1=SEL_12._col0, _col1(Left Outer),Output:["_col0","_col1","_col4"] - <-Select Operator [SEL_12] (rows=83 width=182) + <-Select Operator [SEL_12] (rows=166 width=182) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_11] (rows=83 width=178) + Group By Operator [GBY_11] (rows=166 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 - <-Group By Operator [GBY_4] (rows=250 width=178) + <-Group By Operator [GBY_4] (rows=500 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_3] @@ -2389,9 +2389,9 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] llap SHUFFLE [RS_29] PartitionCols:_col2 - Filter Operator [FIL_39] (rows=41 width=186) + Filter Operator [FIL_39] (rows=166 width=186) predicate:_col2 is not null - Group By Operator [GBY_16] (rows=41 width=186) + Group By Operator [GBY_16] (rows=166 width=186) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 3 [SIMPLE_EDGE] llap SHUFFLE [RS_15] @@ -2474,14 +2474,14 @@ Stage-0 Stage-1 Reducer 3 llap File Output Operator [FS_21] - Merge Join Operator [MERGEJOIN_31] (rows=13 width=227) + Merge Join Operator [MERGEJOIN_31] (rows=26 width=227) Conds:RS_17._col1=RS_18._col0(Left Semi),Output:["_col0","_col1","_col2"] <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_17] PartitionCols:_col1 - Select Operator [SEL_6] (rows=13 width=227) + Select Operator [SEL_6] (rows=26 width=227) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_5] (rows=13 width=235) + Group By Operator [GBY_5] (rows=26 width=235) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0, KEY._col1 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_4] @@ -2629,11 +2629,11 @@ Stage-0 Stage-1 Reducer 3 llap File Output Operator [FS_24] - Select Operator [SEL_23] (rows=19 width=223) + Select Operator [SEL_23] (rows=21 width=223) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_22] (rows=19 width=234) + Filter Operator [FIL_22] (rows=21 width=236) predicate:CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END - Merge Join Operator [MERGEJOIN_45] (rows=38 width=234) + Merge Join Operator [MERGEJOIN_45] (rows=43 width=235) Conds:RS_19._col0, _col1=RS_20._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col8"] <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_19] @@ -2665,9 +2665,9 @@ Stage-0 <-Reducer 5 [ONE_TO_ONE_EDGE] llap FORWARD [RS_20] PartitionCols:_col0, _col1 - Select Operator [SEL_15] (rows=4 width=223) + Select Operator [SEL_15] (rows=8 width=223) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_14] (rows=4 width=219) + Group By Operator [GBY_14] (rows=8 width=219) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_13] diff --git a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out index 1dd9a0ff30..ebcc781bb8 100644 --- a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out +++ b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out @@ -501,24 +501,24 @@ Stage-0 Stage-1 Reducer 5 vectorized, llap File Output Operator [FS_172] - Group By Operator [GBY_171] (rows=39 width=268) + Group By Operator [GBY_171] (rows=156 width=268) Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 4 [SIMPLE_EDGE] <-Reducer 3 [CONTAINS] llap Reduce Output Operator [RS_130] PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_129] (rows=39 width=268) + Group By Operator [GBY_129] (rows=78 width=268) Output:["_col0","_col1","_col2"],keys:_col0, _col1, _col2 - Select Operator [SEL_127] (rows=39 width=268) + Select Operator [SEL_127] (rows=78 width=268) Output:["_col0","_col1","_col2"] - Merge Join Operator [MERGEJOIN_126] (rows=39 width=268) + Merge Join Operator [MERGEJOIN_126] (rows=78 width=268) Conds:RS_22._col3=RS_170._col0(Inner),Output:["_col1","_col2","_col4"] <-Reducer 11 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_170] PartitionCols:_col0 - Select Operator [SEL_169] (rows=262 width=91) + Select Operator [SEL_169] (rows=525 width=91) Output:["_col0"] - Group By Operator [GBY_168] (rows=262 width=178) + Group By Operator [GBY_168] (rows=525 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 10 [SIMPLE_EDGE] <-Map 12 [CONTAINS] vectorized, llap @@ -569,18 +569,18 @@ Stage-0 <-Reducer 7 [CONTAINS] llap Reduce Output Operator [RS_135] PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_134] (rows=39 width=268) + Group By Operator [GBY_134] (rows=78 width=268) Output:["_col0","_col1","_col2"],keys:_col0, _col1, _col2 - Select Operator [SEL_132] (rows=39 width=268) + Select Operator [SEL_132] (rows=78 width=268) Output:["_col0","_col1","_col2"] - Merge Join Operator [MERGEJOIN_131] (rows=39 width=268) + Merge Join Operator [MERGEJOIN_131] (rows=78 width=268) Conds:RS_48._col3=RS_175._col0(Inner),Output:["_col1","_col2","_col4"] <-Reducer 15 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_175] PartitionCols:_col0 - Select Operator [SEL_174] (rows=262 width=91) + Select Operator [SEL_174] (rows=525 width=91) Output:["_col0"] - Group By Operator [GBY_173] (rows=262 width=178) + Group By Operator [GBY_173] (rows=525 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 14 [SIMPLE_EDGE] <-Map 13 [CONTAINS] vectorized, llap @@ -683,38 +683,38 @@ Stage-0 Stage-1 Reducer 7 vectorized, llap File Output Operator [FS_334] - Group By Operator [GBY_333] (rows=49 width=177) + Group By Operator [GBY_333] (rows=384 width=177) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 6 [SIMPLE_EDGE] <-Reducer 5 [CONTAINS] vectorized, llap Reduce Output Operator [RS_332] PartitionCols:_col0, _col1 - Group By Operator [GBY_331] (rows=49 width=177) + Group By Operator [GBY_331] (rows=200 width=177) Output:["_col0","_col1"],keys:_col0, _col1 - Group By Operator [GBY_330] (rows=43 width=177) + Group By Operator [GBY_330] (rows=208 width=177) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 4 [SIMPLE_EDGE] <-Reducer 10 [CONTAINS] llap Reduce Output Operator [RS_249] PartitionCols:_col0, _col1 - Group By Operator [GBY_248] (rows=43 width=177) + Group By Operator [GBY_248] (rows=104 width=177) Output:["_col0","_col1"],keys:_col0, _col1 - Select Operator [SEL_246] (rows=48 width=177) + Select Operator [SEL_246] (rows=130 width=177) Output:["_col0","_col1"] - Merge Join Operator [MERGEJOIN_245] (rows=48 width=177) + Merge Join Operator [MERGEJOIN_245] (rows=130 width=177) Conds:RS_58._col3=RS_340._col0(Inner),Output:["_col1","_col2"] <-Reducer 20 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_340] PartitionCols:_col0 - Select Operator [SEL_339] (rows=381 width=91) + Select Operator [SEL_339] (rows=1025 width=91) Output:["_col0"] - Group By Operator [GBY_338] (rows=381 width=178) + Group By Operator [GBY_338] (rows=1025 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 19 [SIMPLE_EDGE] <-Map 22 [CONTAINS] vectorized, llap Reduce Output Operator [RS_364] PartitionCols:_col0, _col1 - Group By Operator [GBY_363] (rows=381 width=178) + Group By Operator [GBY_363] (rows=512 width=178) Output:["_col0","_col1"],keys:_col1, _col0 Select Operator [SEL_362] (rows=500 width=178) Output:["_col0","_col1"] @@ -725,11 +725,11 @@ Stage-0 <-Reducer 18 [CONTAINS] vectorized, llap Reduce Output Operator [RS_356] PartitionCols:_col0, _col1 - Group By Operator [GBY_355] (rows=381 width=178) + Group By Operator [GBY_355] (rows=512 width=178) Output:["_col0","_col1"],keys:_col1, _col0 - Select Operator [SEL_354] (rows=262 width=178) + Select Operator [SEL_354] (rows=525 width=178) Output:["_col0","_col1"] - Group By Operator [GBY_353] (rows=262 width=178) + Group By Operator [GBY_353] (rows=525 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 17 [SIMPLE_EDGE] <-Map 16 [CONTAINS] vectorized, llap @@ -780,11 +780,11 @@ Stage-0 <-Reducer 3 [CONTAINS] llap Reduce Output Operator [RS_235] PartitionCols:_col0, _col1 - Group By Operator [GBY_234] (rows=43 width=177) + Group By Operator [GBY_234] (rows=104 width=177) Output:["_col0","_col1"],keys:_col0, _col1 - Select Operator [SEL_232] (rows=39 width=177) + Select Operator [SEL_232] (rows=78 width=177) Output:["_col0","_col1"] - Merge Join Operator [MERGEJOIN_231] (rows=39 width=177) + Merge Join Operator [MERGEJOIN_231] (rows=78 width=177) Conds:RS_22._col3=RS_329._col0(Inner),Output:["_col1","_col2"] <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_22] @@ -802,9 +802,9 @@ Stage-0 <-Reducer 14 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_329] PartitionCols:_col0 - Select Operator [SEL_328] (rows=262 width=91) + Select Operator [SEL_328] (rows=525 width=91) Output:["_col0"] - Group By Operator [GBY_327] (rows=262 width=178) + Group By Operator [GBY_327] (rows=525 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 13 [SIMPLE_EDGE] <-Map 12 [CONTAINS] vectorized, llap @@ -832,11 +832,11 @@ Stage-0 <-Reducer 8 [CONTAINS] llap Reduce Output Operator [RS_244] PartitionCols:_col0, _col1 - Group By Operator [GBY_243] (rows=49 width=177) + Group By Operator [GBY_243] (rows=200 width=177) Output:["_col0","_col1"],keys:_col0, _col1 - Select Operator [SEL_241] (rows=55 width=177) + Select Operator [SEL_241] (rows=193 width=177) Output:["_col0","_col1"] - Merge Join Operator [MERGEJOIN_240] (rows=55 width=177) + Merge Join Operator [MERGEJOIN_240] (rows=193 width=177) Conds:RS_111._col3=RS_337._col0(Inner),Output:["_col1","_col2"] <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_111] @@ -845,15 +845,15 @@ Stage-0 <-Reducer 29 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_337] PartitionCols:_col0 - Select Operator [SEL_336] (rows=440 width=91) + Select Operator [SEL_336] (rows=1525 width=91) Output:["_col0"] - Group By Operator [GBY_335] (rows=440 width=178) + Group By Operator [GBY_335] (rows=1525 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 28 [SIMPLE_EDGE] <-Map 32 [CONTAINS] vectorized, llap Reduce Output Operator [RS_388] PartitionCols:_col0, _col1 - Group By Operator [GBY_387] (rows=440 width=178) + Group By Operator [GBY_387] (rows=762 width=178) Output:["_col0","_col1"],keys:_col1, _col0 Select Operator [SEL_386] (rows=500 width=178) Output:["_col0","_col1"] @@ -864,17 +864,17 @@ Stage-0 <-Reducer 27 [CONTAINS] vectorized, llap Reduce Output Operator [RS_376] PartitionCols:_col0, _col1 - Group By Operator [GBY_375] (rows=440 width=178) + Group By Operator [GBY_375] (rows=762 width=178) Output:["_col0","_col1"],keys:_col1, _col0 - Select Operator [SEL_374] (rows=381 width=178) + Select Operator [SEL_374] (rows=1025 width=178) Output:["_col0","_col1"] - Group By Operator [GBY_373] (rows=381 width=178) + Group By Operator [GBY_373] (rows=1025 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 26 [SIMPLE_EDGE] <-Map 31 [CONTAINS] vectorized, llap Reduce Output Operator [RS_384] PartitionCols:_col0, _col1 - Group By Operator [GBY_383] (rows=381 width=178) + Group By Operator [GBY_383] (rows=512 width=178) Output:["_col0","_col1"],keys:_col1, _col0 Select Operator [SEL_382] (rows=500 width=178) Output:["_col0","_col1"] @@ -885,11 +885,11 @@ Stage-0 <-Reducer 25 [CONTAINS] vectorized, llap Reduce Output Operator [RS_372] PartitionCols:_col0, _col1 - Group By Operator [GBY_371] (rows=381 width=178) + Group By Operator [GBY_371] (rows=512 width=178) Output:["_col0","_col1"],keys:_col1, _col0 - Select Operator [SEL_370] (rows=262 width=178) + Select Operator [SEL_370] (rows=525 width=178) Output:["_col0","_col1"] - Group By Operator [GBY_369] (rows=262 width=178) + Group By Operator [GBY_369] (rows=525 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Union 24 [SIMPLE_EDGE] <-Map 23 [CONTAINS] vectorized, llap diff --git a/ql/src/test/results/clientpositive/llap/explainuser_4.q.out b/ql/src/test/results/clientpositive/llap/explainuser_4.q.out index 8de83793ac..fdb57c9c05 100644 --- a/ql/src/test/results/clientpositive/llap/explainuser_4.q.out +++ b/ql/src/test/results/clientpositive/llap/explainuser_4.q.out @@ -192,11 +192,11 @@ Stage-0 Stage-1 Reducer 4 vectorized, llap File Output Operator [FS_41] - Select Operator [SEL_40] (rows=509 width=10) + Select Operator [SEL_40] (rows=510 width=10) Output:["_col0","_col1"] <-Reducer 3 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_39] - Group By Operator [GBY_38] (rows=509 width=10) + Group By Operator [GBY_38] (rows=510 width=10) Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0 <-Reducer 2 [SIMPLE_EDGE] llap SHUFFLE [RS_11] diff --git a/ql/src/test/results/clientpositive/llap/intersect_all.q.out b/ql/src/test/results/clientpositive/llap/intersect_all.q.out index dbb77d1abc..f9bf8377af 100644 --- a/ql/src/test/results/clientpositive/llap/intersect_all.q.out +++ b/ql/src/test/results/clientpositive/llap/intersect_all.q.out @@ -213,18 +213,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 4 Execution mode: llap @@ -234,7 +234,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col3 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE @@ -264,18 +264,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Union 3 Vertex: Union 3 @@ -918,18 +918,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -939,18 +939,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 4 Execution mode: llap @@ -960,7 +960,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 388000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col3 = 4L) (type: boolean) Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE @@ -990,18 +990,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -1011,18 +1011,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 194000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out b/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out index 604c7bbd63..a7830e961d 100644 --- a/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out +++ b/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out @@ -211,18 +211,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -232,7 +232,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 186 Basic stats: COMPLETE Column stats: COMPLETE @@ -255,18 +255,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 @@ -718,18 +718,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -739,18 +739,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -760,7 +760,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 372000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 4L) (type: boolean) Statistics: Num rows: 1 Data size: 186 Basic stats: COMPLETE Column stats: COMPLETE @@ -783,18 +783,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -804,18 +804,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/llap/intersect_merge.q.out b/ql/src/test/results/clientpositive/llap/intersect_merge.q.out index b19fd2c4ec..f55ac4e622 100644 --- a/ql/src/test/results/clientpositive/llap/intersect_merge.q.out +++ b/ql/src/test/results/clientpositive/llap/intersect_merge.q.out @@ -184,18 +184,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 12 Execution mode: vectorized, llap @@ -211,12 +211,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -232,12 +232,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -247,7 +247,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -270,18 +270,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -297,12 +297,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 @@ -439,18 +439,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -466,12 +466,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -481,7 +481,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 4L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -504,18 +504,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -531,12 +531,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 @@ -697,18 +697,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 12 Execution mode: vectorized, llap @@ -724,12 +724,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -745,12 +745,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -760,7 +760,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -783,18 +783,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -810,12 +810,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 @@ -976,18 +976,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 12 Execution mode: vectorized, llap @@ -1003,12 +1003,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -1024,12 +1024,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -1039,7 +1039,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -1062,18 +1062,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -1089,12 +1089,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 @@ -1255,18 +1255,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 12 Execution mode: vectorized, llap @@ -1282,12 +1282,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 2 Execution mode: vectorized, llap @@ -1303,12 +1303,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -1318,7 +1318,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -1341,18 +1341,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -1368,12 +1368,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 @@ -1492,12 +1492,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -1507,7 +1507,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 3L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -1530,18 +1530,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -1557,12 +1557,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 @@ -1681,12 +1681,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 4 Execution mode: llap @@ -1696,7 +1696,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col3 = 3L) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE @@ -1726,18 +1726,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 8 Execution mode: vectorized, llap @@ -1753,12 +1753,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Union 3 Vertex: Union 3 @@ -1912,12 +1912,12 @@ STAGE PLANS: keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Reducer 7 Execution mode: vectorized, llap @@ -1927,7 +1927,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col2 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -1961,18 +1961,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col2) keys: _col0 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint) Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out index a84a7b3db3..f662e4c752 100644 --- a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out +++ b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out @@ -546,7 +546,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 110092 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: tinyint) @@ -648,7 +648,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 110092 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: tinyint) diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out index 8d5848bcd3..c51f54b0d9 100644 --- a/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out +++ b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out @@ -588,7 +588,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 110092 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: tinyint) @@ -704,7 +704,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 110092 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: tinyint) diff --git a/ql/src/test/results/clientpositive/llap/mrr.q.out b/ql/src/test/results/clientpositive/llap/mrr.q.out index a8aceea293..d6bde919c8 100644 --- a/ql/src/test/results/clientpositive/llap/mrr.q.out +++ b/ql/src/test/results/clientpositive/llap/mrr.q.out @@ -485,7 +485,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: string) @@ -918,7 +918,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out index 133d8af9e9..420ab59c93 100644 --- a/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out +++ b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out @@ -551,7 +551,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 110092 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: tinyint) @@ -654,7 +654,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 110092 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: tinyint) diff --git a/ql/src/test/results/clientpositive/llap/parallel.q.out b/ql/src/test/results/clientpositive/llap/parallel.q.out index 692bb8ca74..cd5ed93241 100644 --- a/ql/src/test/results/clientpositive/llap/parallel.q.out +++ b/ql/src/test/results/clientpositive/llap/parallel.q.out @@ -77,17 +77,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: Forward - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete diff --git a/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out b/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out index 91a450accf..b5e2523676 100644 --- a/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out +++ b/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out @@ -77,17 +77,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: Forward - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete diff --git a/ql/src/test/results/clientpositive/llap/ptf.q.out b/ql/src/test/results/clientpositive/llap/ptf.q.out index b719f73566..59c6d315e1 100644 --- a/ql/src/test/results/clientpositive/llap/ptf.q.out +++ b/ql/src/test/results/clientpositive/llap/ptf.q.out @@ -964,7 +964,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -999,14 +999,14 @@ STAGE PLANS: window function: GenericUDAFLagEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), _col2 (type: int), (_col2 - lag_window_2) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 13 Data size: 3107 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6214 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3107 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6214 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2699,10 +2699,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2840,11 +2840,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), round(_col2, 2) (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -2858,12 +2858,12 @@ STAGE PLANS: output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: double) Reducer 3 Execution mode: llap @@ -2871,7 +2871,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -2891,14 +2891,14 @@ STAGE PLANS: name: sum window function: GenericUDAFSumDouble window frame: ROWS PRECEDING(2)~CURRENT - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: double), round(sum_window_0, 2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out index 8d04800040..bf127c2b88 100644 --- a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out +++ b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out @@ -152,18 +152,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col1), count(DISTINCT _col2) keys: _col0 (type: int), _col1 (type: int), _col2 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -253,18 +253,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col2), count(DISTINCT _col1) keys: _col0 (type: int), _col2 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -354,18 +354,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col1), count(DISTINCT _col2) keys: _col0 (type: int), _col1 (type: int), _col2 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -455,18 +455,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col2), count(DISTINCT _col1) keys: _col0 (type: int), _col2 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out index 54dc0f7b8f..0a7ac061af 100644 --- a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out +++ b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out @@ -358,14 +358,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 19 Data size: 3534 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 38 Data size: 7068 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col2 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 19 Data size: 1805 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 38 Data size: 3610 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 19 Data size: 1805 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 38 Data size: 3610 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -525,11 +525,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string) mode: complete @@ -597,7 +597,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out index 17601acc2d..e0fc9b558f 100644 --- a/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out +++ b/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out @@ -46,10 +46,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -743,10 +743,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -997,10 +997,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 262 Data size: 46636 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 525 Data size: 93450 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 262 Data size: 46636 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 525 Data size: 93450 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2524,10 +2524,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3221,10 +3221,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3475,10 +3475,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 262 Data size: 46636 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 525 Data size: 93450 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 262 Data size: 46636 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 525 Data size: 93450 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/sharedworkext.q.out b/ql/src/test/results/clientpositive/llap/sharedworkext.q.out index e3488151bf..51a5d76f47 100644 --- a/ql/src/test/results/clientpositive/llap/sharedworkext.q.out +++ b/ql/src/test/results/clientpositive/llap/sharedworkext.q.out @@ -117,16 +117,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Reducer 4 Execution mode: llap Reduce Operator Tree: @@ -137,10 +137,10 @@ STAGE PLANS: 0 _col0 (type: string) 1 _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 493 Data size: 42891 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1980 Data size: 172260 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 493 Data size: 42891 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1980 Data size: 172260 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -152,16 +152,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Stage: Stage-0 Fetch Operator @@ -625,28 +625,28 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string) outputColumnNames: _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: 0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: 0 (type: int) - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: 0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: 0 (type: int) - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE Reducer 4 Execution mode: vectorized, llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey1 (type: string) outputColumnNames: _col1 - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -667,19 +667,19 @@ STAGE PLANS: window function: GenericUDAFRankEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: rank_window_0 is not null (type: boolean) - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: rank_window_0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 395 Data size: 1580 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 395 Data size: 1580 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Execution mode: llap Reduce Operator Tree: @@ -690,10 +690,10 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 395 Data size: 1580 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 395 Data size: 1580 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -704,7 +704,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey1 (type: string) outputColumnNames: _col1 - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -725,19 +725,19 @@ STAGE PLANS: window function: GenericUDAFRankEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: rank_window_0 is not null (type: boolean) - Statistics: Num rows: 395 Data size: 34365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: rank_window_0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 395 Data size: 1580 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 395 Data size: 1580 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out b/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out index cd178cff4c..1840b42d4d 100644 --- a/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out +++ b/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out @@ -804,7 +804,7 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -1107,7 +1107,7 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -1410,7 +1410,7 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -1713,7 +1713,7 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/subquery_in.q.out b/ql/src/test/results/clientpositive/llap/subquery_in.q.out index 9cfc960bf0..eca9f95537 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_in.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_in.q.out @@ -990,14 +990,14 @@ STAGE PLANS: 1 _col0 (type: string) outputColumnNames: _col0, _col1, _col3 residual filter predicates: {(_col3 > _col1)} - Statistics: Num rows: 65 Data size: 17485 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 131 Data size: 35239 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 65 Data size: 11570 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 131 Data size: 23318 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 65 Data size: 11570 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 131 Data size: 23318 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1009,17 +1009,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string) Stage: Stage-0 @@ -1510,10 +1510,10 @@ STAGE PLANS: 0 _col4 (type: string), _col5 (type: int) 1 _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 6 Data size: 3714 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 14 Data size: 8666 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 3714 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 14 Data size: 8666 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1525,17 +1525,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2808 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int) - Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out b/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out index 3839696882..4cf73ecd46 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out @@ -665,15 +665,15 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: _col2 is not null (type: boolean) - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col2 (type: bigint) sort order: + Map-reduce partition columns: _col2 (type: bigint) - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string), _col1 (type: string) Reducer 5 Execution mode: llap @@ -917,10 +917,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: _col2 is not null (type: boolean) - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE Map Join Operator condition map: Left Semi Join 0 to 1 @@ -1070,10 +1070,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 197 Data size: 36642 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: _col2 is not null (type: boolean) - Statistics: Num rows: 197 Data size: 36642 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Map Join Operator condition map: Left Semi Join 0 to 1 diff --git a/ql/src/test/results/clientpositive/llap/subquery_multi.q.out b/ql/src/test/results/clientpositive/llap/subquery_multi.q.out index 7b00d69754..27231a1f72 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_multi.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_multi.q.out @@ -1859,16 +1859,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 7 Data size: 1372 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5096 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 7 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 7 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5200 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Reducer 2 Execution mode: llap @@ -1914,17 +1914,17 @@ STAGE PLANS: 0 _col3 (type: string), _col4 (type: string) 1 _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 - Statistics: Num rows: 16 Data size: 3891 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 5720 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: CASE WHEN ((_col10 = 0L)) THEN (true) WHEN (_col10 is null) THEN (true) WHEN (_col14 is not null) THEN (false) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2860 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2860 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2860 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2362,16 +2362,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 7 Data size: 1372 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5096 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 7 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 7 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5200 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Reducer 2 Execution mode: llap @@ -2417,17 +2417,17 @@ STAGE PLANS: 0 _col3 (type: string), _col4 (type: string) 1 _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 - Statistics: Num rows: 16 Data size: 5484 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 5720 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: CASE WHEN ((_col10 = 0L)) THEN (true) WHEN (_col10 is null) THEN (true) WHEN (_col14 is not null) THEN (false) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 8 Data size: 2742 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2860 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 8 Data size: 2742 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2860 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 2742 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2860 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2686,17 +2686,17 @@ STAGE PLANS: 0 _col3 (type: string), _col5 (type: int) 1 _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 - Statistics: Num rows: 16 Data size: 3891 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 28 Data size: 3203 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: CASE WHEN ((_col10 = 0L)) THEN (true) WHEN (_col10 is null) THEN (true) WHEN (_col14 is not null) THEN (false) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1601 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1601 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 1601 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2723,16 +2723,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2808 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2912 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int) - Statistics: Num rows: 13 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2912 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -3364,15 +3364,15 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 197 Data size: 36642 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: _col2 is not null (type: boolean) - Statistics: Num rows: 197 Data size: 36642 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col2 (type: bigint) sort order: + Map-reduce partition columns: _col2 (type: bigint) - Statistics: Num rows: 197 Data size: 36642 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string), _col1 (type: string) Reducer 4 Execution mode: llap @@ -4037,10 +4037,10 @@ STAGE PLANS: 0 _col5 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 - Statistics: Num rows: 27 Data size: 16721 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 28 Data size: 17344 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 27 Data size: 16721 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 28 Data size: 17344 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: boolean) Reducer 3 Execution mode: llap @@ -4052,21 +4052,21 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col12 - Statistics: Num rows: 27 Data size: 17153 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 28 Data size: 17792 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: bigint), _col12 (type: bigint), _col10 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 - Statistics: Num rows: 27 Data size: 17153 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 28 Data size: 17792 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((_col0 = 3) or CASE WHEN ((_col9 = 0L)) THEN (true) WHEN (_col12 is not null) THEN (false) WHEN (_col5 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (true) END) (type: boolean) - Statistics: Num rows: 14 Data size: 8898 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 9533 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 14 Data size: 8666 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 9285 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 14 Data size: 8666 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 9285 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -4078,16 +4078,16 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), true (type: boolean) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) Reducer 6 Execution mode: vectorized, llap diff --git a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out index 29d8bbfb48..b55b38cb2b 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out @@ -402,17 +402,17 @@ STAGE PLANS: 0 _col0 (type: string), _col1 (type: string) 1 _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col8 - Statistics: Num rows: 38 Data size: 8914 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 43 Data size: 10129 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 19 Data size: 4457 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 21 Data size: 4963 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 19 Data size: 4237 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 21 Data size: 4683 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 19 Data size: 4237 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 21 Data size: 4683 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -531,16 +531,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 876 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1752 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4 Data size: 892 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1784 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 4 Data size: 892 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1784 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -1738,17 +1738,17 @@ STAGE PLANS: 0 _col4 (type: string), _col5 (type: int) 1 _col1 (type: string), _col0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 - Statistics: Num rows: 64 Data size: 40340 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 89 Data size: 56315 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col10 = 0L)) THEN (true) WHEN (_col10 is null) THEN (true) WHEN (_col14 is not null) THEN (false) WHEN (_col5 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 32 Data size: 20180 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 44 Data size: 27848 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 32 Data size: 19808 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 44 Data size: 27236 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 32 Data size: 19808 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 44 Data size: 27236 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1775,16 +1775,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2808 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: int), _col0 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2912 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col0 (type: int) sort order: ++ Map-reduce partition columns: _col1 (type: string), _col0 (type: int) - Statistics: Num rows: 13 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2912 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -2393,12 +2393,12 @@ STAGE PLANS: 0 _col0 (type: string), _col1 (type: int) 1 _col1 (type: string), _col3 (type: int) outputColumnNames: _col1, _col3, _col4, _col7 - Statistics: Num rows: 48 Data size: 660 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 72 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col3 = 0L)) THEN (true) WHEN (_col3 is null) THEN (true) WHEN (_col7 is not null) THEN (false) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 24 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 36 Data size: 628 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 24 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 36 Data size: 628 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -2445,16 +2445,16 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1625 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3354 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3354 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string), _col2 (type: boolean) Reducer 8 Execution mode: llap @@ -2466,12 +2466,12 @@ STAGE PLANS: 0 _col0 (type: int) 1 (_col0 + 100) (type: int) outputColumnNames: _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3354 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col3 (type: int) sort order: ++ Map-reduce partition columns: _col1 (type: string), _col3 (type: int) - Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3354 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -2824,17 +2824,17 @@ STAGE PLANS: 0 _col1 (type: string), _col5 (type: int) 1 _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 - Statistics: Num rows: 40 Data size: 25032 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 50 Data size: 31422 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col10 = 0L)) THEN (true) WHEN (_col10 is null) THEN (true) WHEN (_col14 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 20 Data size: 12524 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 15719 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 20 Data size: 12380 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 15475 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 12380 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 15475 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2905,16 +2905,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 13 Data size: 1625 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 774 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int) - Statistics: Num rows: 6 Data size: 774 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -3041,12 +3041,12 @@ STAGE PLANS: 0 _col0 (type: int), _col5 (type: int) 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12 - Statistics: Num rows: 39 Data size: 24365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 53 Data size: 33255 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string), _col5 (type: int) - Statistics: Num rows: 39 Data size: 24365 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 53 Data size: 33255 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: bigint), _col12 (type: bigint) Reducer 3 Execution mode: llap @@ -3058,17 +3058,17 @@ STAGE PLANS: 0 _col0 (type: int), _col1 (type: string), _col5 (type: int) 1 _col1 (type: int), _col0 (type: string), _col2 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col16 - Statistics: Num rows: 59 Data size: 37149 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 108 Data size: 68404 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col11 = 0L)) THEN (true) WHEN (_col11 is null) THEN (true) WHEN (_col16 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col12 < _col11)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 29 Data size: 18267 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 54 Data size: 34210 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 29 Data size: 17951 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 54 Data size: 33426 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 29 Data size: 17951 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 54 Data size: 33426 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3081,12 +3081,12 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 624 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 13 Data size: 312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 624 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 6 Execution mode: vectorized, llap @@ -3095,16 +3095,16 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3354 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: int), _col2 (type: int), true (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 1729 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3458 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: int), _col0 (type: string), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col1 (type: int), _col0 (type: string), _col2 (type: int) - Statistics: Num rows: 13 Data size: 1729 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 3458 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col3 (type: boolean) Stage: Stage-0 @@ -3232,17 +3232,17 @@ STAGE PLANS: 0 UDFToDouble(_col1) (type: double), _col2 (type: string) 1 _col0 (type: double), _col1 (type: string) outputColumnNames: _col0, _col1, _col4, _col5, _col8 - Statistics: Num rows: 58 Data size: 13682 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 83 Data size: 19807 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 29 Data size: 6849 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 41 Data size: 9793 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 29 Data size: 3509 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 41 Data size: 4961 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 29 Data size: 3509 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 41 Data size: 4961 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3269,16 +3269,16 @@ STAGE PLANS: keys: KEY._col0 (type: double), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2600 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: double), _col1 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2704 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: double), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: double), _col1 (type: string) - Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2704 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -3468,17 +3468,17 @@ STAGE PLANS: 0 _col1 (type: string), _col2 (type: int) 1 _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1, _col4, _col5, _col8 - Statistics: Num rows: 46 Data size: 10734 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 72 Data size: 17104 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 23 Data size: 5375 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 36 Data size: 8560 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 23 Data size: 2783 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 36 Data size: 4356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 23 Data size: 2783 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 36 Data size: 4356 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3549,16 +3549,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2808 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2912 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int) - Statistics: Num rows: 8 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2912 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -4015,14 +4015,14 @@ STAGE PLANS: 0 _col0 (type: string), _col1 (type: string) 1 _col1 (type: string), _col0 (type: string) outputColumnNames: _col0, _col1, _col3, _col4, _col7 - Statistics: Num rows: 1623 Data size: 309794 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2352 Data size: 454136 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col3 = 0L)) THEN (true) WHEN (_col3 is null) THEN (true) WHEN (_col7 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 811 Data size: 154810 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1176 Data size: 227076 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 811 Data size: 154810 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1176 Data size: 227076 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col0 (type: string) @@ -4093,16 +4093,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 68750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 137500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col0 (type: string) sort order: ++ Map-reduce partition columns: _col1 (type: string), _col0 (type: string) - Statistics: Num rows: 250 Data size: 68750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 137500 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Reducer 9 Execution mode: vectorized, llap @@ -5470,12 +5470,12 @@ STAGE PLANS: 0 _col0 (type: string), _col1 (type: int) 1 _col1 (type: string), _col3 (type: int) outputColumnNames: _col1, _col3, _col4, _col7 - Statistics: Num rows: 53 Data size: 780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 82 Data size: 1476 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col3 = 0L)) THEN (true) WHEN (_col3 is null) THEN (true) WHEN (_col7 is not null) THEN (false) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 26 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 41 Data size: 748 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 26 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 41 Data size: 748 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -5522,16 +5522,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 2548 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5096 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: UDFToDouble(_col0) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 13 Data size: 2600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5200 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string), _col2 (type: boolean) Reducer 8 Execution mode: llap @@ -5543,12 +5543,12 @@ STAGE PLANS: 0 UDFToDouble(_col0) (type: double) 1 UDFToDouble((_col0 + 100)) (type: double) outputColumnNames: _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col3 (type: int) sort order: ++ Map-reduce partition columns: _col1 (type: string), _col3 (type: int) - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2600 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -6155,17 +6155,17 @@ STAGE PLANS: 0 _col0 (type: int), _col1 (type: int) 1 _col1 (type: int), _col0 (type: int) outputColumnNames: _col0, _col1, _col3, _col4, _col7 - Statistics: Num rows: 3 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col3 = 0L)) THEN (true) WHEN (_col3 is null) THEN (true) WHEN (_col7 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -6192,16 +6192,16 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: int), _col0 (type: int), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: int), _col0 (type: int) sort order: ++ Map-reduce partition columns: _col1 (type: int), _col0 (type: int) - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -6616,17 +6616,17 @@ STAGE PLANS: 1 _col0 (type: int) 2 _col2 (type: int) outputColumnNames: _col0, _col1, _col3, _col4, _col6 - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col3 = 0L)) THEN (true) WHEN (_col3 is null) THEN (true) WHEN (_col6 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -6639,12 +6639,12 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint), _col2 (type: bigint) Reducer 4 Execution mode: vectorized, llap @@ -6653,16 +6653,16 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), true (type: boolean) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) Reducer 5 Execution mode: llap @@ -6674,12 +6674,12 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col1, _col2 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col2 (type: int) sort order: + Map-reduce partition columns: _col2 (type: int) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) Reducer 7 Execution mode: vectorized, llap @@ -6688,12 +6688,12 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Stage: Stage-0 Fetch Operator @@ -6790,12 +6790,12 @@ STAGE PLANS: 0 _col1 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col3 (type: bigint), _col4 (type: bigint) Reducer 3 Execution mode: llap @@ -6807,17 +6807,17 @@ STAGE PLANS: 0 _col0 (type: int), _col1 (type: int) 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col0, _col3, _col4, _col7 - Statistics: Num rows: 4 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 132 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col3 = 0L)) THEN (true) WHEN (_col3 is null) THEN (true) WHEN (_col7 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 2 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 68 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -6830,12 +6830,12 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint), _col2 (type: bigint) Reducer 5 Execution mode: vectorized, llap @@ -6844,16 +6844,16 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Stage: Stage-0 @@ -7315,17 +7315,17 @@ STAGE PLANS: 1 _col0 (type: string) outputColumnNames: _col0, _col1, _col3, _col4, _col6, _col7 residual filter predicates: {(_col1 > _col6)} - Statistics: Num rows: 1145 Data size: 236851 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1383 Data size: 305633 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col3 = 0L)) THEN (true) WHEN (_col3 is null) THEN (true) WHEN (_col7 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 572 Data size: 118384 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 691 Data size: 152775 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 572 Data size: 101816 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 691 Data size: 122998 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 572 Data size: 101816 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 691 Data size: 122998 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -7380,16 +7380,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 83 Data size: 15106 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30212 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 83 Data size: 15106 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30212 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string), _col2 (type: boolean) Reducer 8 Execution mode: vectorized, llap diff --git a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out index c43ad9188b..3b2dbf5917 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out @@ -2403,17 +2403,17 @@ STAGE PLANS: 0 _col0 (type: int), _col5 (type: int) 1 _col2 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 39 Data size: 24309 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 53 Data size: 33143 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN (_col10 is null) THEN ((UDFToLong(_col5) <> 0)) ELSE ((UDFToLong(_col5) <> _col9)) END (type: boolean) - Statistics: Num rows: 19 Data size: 11845 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 16262 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 19 Data size: 11761 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 16094 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 19 Data size: 11761 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 16094 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2426,16 +2426,16 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 416 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: bigint), true (type: boolean), _col0 (type: int), _col1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 520 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col2 (type: int), _col3 (type: int) sort order: ++ Map-reduce partition columns: _col2 (type: int), _col3 (type: int) - Statistics: Num rows: 13 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 520 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: boolean) Stage: Stage-0 @@ -5670,10 +5670,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) Reducer 5 Execution mode: llap @@ -5686,14 +5686,14 @@ STAGE PLANS: 1 outputColumnNames: _col0, _col1, _col2, _col3 residual filter predicates: {(_col2 > _col3)} - Statistics: Num rows: 13 Data size: 2522 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10670 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2418 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10230 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 2418 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10230 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/subquery_select.q.out b/ql/src/test/results/clientpositive/llap/subquery_select.q.out index 0435530467..31ae407d83 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_select.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_select.q.out @@ -4429,7 +4429,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 2808 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/llap/subquery_views.q.out b/ql/src/test/results/clientpositive/llap/subquery_views.q.out index aa50b468c3..003426accd 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_views.q.out @@ -243,19 +243,19 @@ STAGE PLANS: 1 _col0 (type: string), _col1 (type: string) 2 _col3 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col4, _col5, _col8 - Statistics: Num rows: 27 Data size: 5346 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 112 Data size: 22176 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 56 Data size: 11088 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 2314 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 56 Data size: 9968 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 13 Data size: 2314 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 56 Data size: 9968 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string) Reducer 3 Execution mode: llap @@ -267,10 +267,10 @@ STAGE PLANS: 0 _col0 (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 2314 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 56 Data size: 9968 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 2314 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 56 Data size: 9968 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -283,12 +283,12 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 27 Data size: 5238 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10670 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 27 Data size: 5238 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10670 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reducer 5 Execution mode: vectorized, llap @@ -297,16 +297,16 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), true (type: boolean) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 27 Data size: 4914 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10010 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 27 Data size: 4914 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10010 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string), _col2 (type: boolean) Reducer 6 Execution mode: llap @@ -318,18 +318,18 @@ STAGE PLANS: 0 _col0 (type: string) 1 _col0 (type: string) outputColumnNames: _col1, _col2, _col3 - Statistics: Num rows: 27 Data size: 4914 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10010 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col3 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col3 (type: string), _col1 (type: string) - Statistics: Num rows: 27 Data size: 4914 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10010 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Reduce Output Operator key expressions: _col3 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col3 (type: string), _col1 (type: string) - Statistics: Num rows: 27 Data size: 4914 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10010 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: boolean) Reducer 7 Execution mode: llap @@ -343,24 +343,24 @@ STAGE PLANS: 1 _col0 (type: string), _col1 (type: string) 2 _col3 (type: string), _col1 (type: string) outputColumnNames: _col0, _col4, _col5, _col8 - Statistics: Num rows: 27 Data size: 2889 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 112 Data size: 11984 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) - Statistics: Num rows: 13 Data size: 1391 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 56 Data size: 5992 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 13 Data size: 1131 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 56 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 13 Data size: 1131 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 13 Data size: 1131 Basic stats: COMPLETE Column stats: COMPLETE Reducer 8 Execution mode: vectorized, llap Reduce Operator Tree: @@ -369,12 +369,12 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 27 Data size: 5238 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10670 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 27 Data size: 5238 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 55 Data size: 10670 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: bigint), _col3 (type: bigint) Stage: Stage-0 diff --git a/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out b/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out index 2ac8400576..23c81fb0af 100644 --- a/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out +++ b/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out @@ -308,14 +308,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 11936 Data size: 2220096 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 14658 Data size: 2726388 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 11936 Data size: 1133920 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 14658 Data size: 1392510 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 11936 Data size: 1133920 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 14658 Data size: 1392510 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/tez_union2.q.out b/ql/src/test/results/clientpositive/llap/tez_union2.q.out index ef0d4bd71a..9c0f100888 100644 --- a/ql/src/test/results/clientpositive/llap/tez_union2.q.out +++ b/ql/src/test/results/clientpositive/llap/tez_union2.q.out @@ -122,10 +122,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 408 Data size: 72624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 816 Data size: 145248 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 408 Data size: 72624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 816 Data size: 145248 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -137,11 +137,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string) mode: complete diff --git a/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out b/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out index 05d259b0d9..ec14fbf098 100644 --- a/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out @@ -3991,18 +3991,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 68000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 136272 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT substr(_col1, 5)) keys: _col0 (type: string), substr(_col1, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 125 Data size: 35000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 70000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 125 Data size: 35000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 70000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT substr(_col1, 5)) keys: _col0 (type: string), _col1 (type: string) @@ -4042,14 +4042,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 12000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), CAST( _col1 AS STRING) (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 34000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 68000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 125 Data size: 34000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 68000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4058,7 +4058,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 125 Data size: 34000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 68000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash diff --git a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out index 93791ac8e0..385f548636 100644 --- a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out +++ b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out @@ -640,10 +640,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1099,18 +1099,18 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 138276 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT substr(_col1, 5)) keys: _col0 (type: string), substr(_col1, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 125 Data size: 35500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 71000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 125 Data size: 35500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 71000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT substr(_col1, 5)) keys: _col0 (type: string), _col1 (type: string) @@ -1150,14 +1150,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 12500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), CAST( _col1 AS STRING) (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 34500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 125 Data size: 34500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1166,7 +1166,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 125 Data size: 34500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -2029,10 +2029,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 138276 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 138276 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2041,7 +2041,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 138276 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -2054,10 +2054,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 115000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 230460 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 115000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 230460 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2066,7 +2066,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 - Statistics: Num rows: 250 Data size: 115000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 230460 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(val1, 'hll'), compute_stats(val2, 'hll') mode: hash @@ -2929,7 +2929,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 69000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 138276 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) keys: _col0 (type: string) @@ -2964,10 +2964,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 115000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 230460 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 115000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 230460 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2976,7 +2976,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 - Statistics: Num rows: 250 Data size: 115000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 501 Data size: 230460 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(val1, 'hll'), compute_stats(val2, 'hll') mode: hash @@ -4685,10 +4685,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -6845,7 +6845,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 408 Data size: 72624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 816 Data size: 145248 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col0 (type: string), _col1 (type: string) @@ -6871,11 +6871,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string) mode: complete @@ -8456,12 +8456,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized Map 5 Map Operator Tree: @@ -8515,17 +8515,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -8533,14 +8533,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 267000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 142500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 142500 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -8549,7 +8549,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 142500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -8582,17 +8582,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 8 Execution mode: vectorized, llap Reduce Operator Tree: @@ -8600,21 +8600,21 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Union 2 Vertex: Union 2 Union 7 @@ -8769,12 +8769,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized Map 5 Map Operator Tree: @@ -8823,14 +8823,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 267000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 142500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 142500 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -8839,7 +8839,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 142500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -8871,17 +8871,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Union 2 Vertex: Union 2 Union 6 @@ -9345,12 +9345,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized Map 11 Map Operator Tree: @@ -9387,12 +9387,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized Map 7 Map Operator Tree: @@ -9423,21 +9423,21 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reducer 12 Execution mode: vectorized Reduce Operator Tree: @@ -9446,17 +9446,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: vectorized, llap Reduce Operator Tree: @@ -9464,17 +9464,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 267000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Execution mode: llap Reduce Operator Tree: @@ -9482,14 +9482,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 190000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 190000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -9498,7 +9498,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 190000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -9531,17 +9531,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) - Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE Union 2 Vertex: Union 2 Union 4 @@ -11569,10 +11569,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -11581,7 +11581,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -11801,10 +11801,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -11813,7 +11813,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -12061,12 +12061,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized Map 4 Map Operator Tree: @@ -12134,10 +12134,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 30 Data size: 5340 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 30 Data size: 5340 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -12149,17 +12149,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Union 2 Vertex: Union 2 Union 6 @@ -12336,12 +12336,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Reducer 4 Execution mode: vectorized, llap Reduce Operator Tree: @@ -12349,10 +12349,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 30 Data size: 5340 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 30 Data size: 5340 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -12364,17 +12364,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 15 Data size: 2670 Basic stats: COMPLETE Column stats: COMPLETE Union 3 Vertex: Union 3 Union 7 diff --git a/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out b/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out index 5337820ab0..ad40635f78 100644 --- a/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out +++ b/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out @@ -74,9 +74,9 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -236,10 +236,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 3536 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 7072 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3536 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 7072 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -248,7 +248,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value - Statistics: Num rows: 13 Data size: 3536 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 7072 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') mode: hash @@ -439,12 +439,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs Reducer 3 @@ -454,17 +454,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Execution mode: vectorized, llap Reduce Operator Tree: @@ -472,10 +472,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 267000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 267000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1103,11 +1103,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 3536 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 7072 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 13 Data size: 3536 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 7072 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col0 (type: string) @@ -1273,12 +1273,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 19 Data size: 5168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 6800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 19 Data size: 5168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 6800 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs Reducer 2 @@ -1310,17 +1310,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 3536 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 7072 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 19 Data size: 5168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 6800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 19 Data size: 5168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 6800 Basic stats: COMPLETE Column stats: COMPLETE Reducer 6 Execution mode: vectorized, llap Reduce Operator Tree: @@ -1328,11 +1328,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 19 Data size: 5168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 51 Data size: 13872 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 19 Data size: 5168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 51 Data size: 13872 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: _col0 (type: string) @@ -1488,12 +1488,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs Map 13 @@ -1509,12 +1509,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs Map 14 @@ -1530,12 +1530,12 @@ STAGE PLANS: keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1250 Data size: 222500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1250 Data size: 222500 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs Reducer 10 @@ -1560,17 +1560,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 750 Data size: 133500 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Execution mode: vectorized, llap Reduce Operator Tree: @@ -1578,17 +1578,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 267000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 7 Execution mode: vectorized, llap Reduce Operator Tree: @@ -1596,17 +1596,17 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1250 Data size: 222500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1250 Data size: 222500 Basic stats: COMPLETE Column stats: COMPLETE Reducer 9 Execution mode: vectorized, llap Reduce Operator Tree: @@ -1614,9 +1614,9 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2500 Data size: 445000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2500 Data size: 445000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -1803,12 +1803,12 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 22576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 167 Data size: 45424 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 83 Data size: 22576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 167 Data size: 45424 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string) Reducer 5 Execution mode: llap @@ -1820,10 +1820,10 @@ STAGE PLANS: 0 _col0 (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 83 Data size: 45152 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 336 Data size: 182784 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 45152 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 336 Data size: 182784 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1857,12 +1857,12 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 22576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 167 Data size: 45424 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 83 Data size: 22576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 167 Data size: 45424 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string) Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out index 52b17cf36b..20b4a9faa8 100644 --- a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out @@ -1097,13 +1097,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1228,13 +1228,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out index 1ba0ab6920..8ddc19774d 100644 --- a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out @@ -173,7 +173,7 @@ STAGE PLANS: keys: KEY._col0 (type: char(20)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 251 Data size: 26857 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(20)) sort order: + @@ -181,7 +181,7 @@ STAGE PLANS: className: VectorReduceSinkObjectHashOperator native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 251 Data size: 26857 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Reducer 3 @@ -200,7 +200,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2] - Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 251 Data size: 26857 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 5 Limit Vectorization: @@ -382,7 +382,7 @@ STAGE PLANS: keys: KEY._col0 (type: char(20)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 251 Data size: 26857 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(20)) sort order: - @@ -390,7 +390,7 @@ STAGE PLANS: className: VectorReduceSinkObjectHashOperator native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 251 Data size: 26857 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Reducer 3 @@ -409,7 +409,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2] - Statistics: Num rows: 250 Data size: 26750 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 251 Data size: 26857 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 5 Limit Vectorization: diff --git a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out index e72e398e4b..01d3725101 100644 --- a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out @@ -194,7 +194,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 97812 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 195620 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: tinyint) outputColumnNames: _col0, _col1 @@ -202,13 +202,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0] - Statistics: Num rows: 1000 Data size: 97812 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 195620 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 1000 Data size: 97812 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 195620 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out index 3ea544e4b8..c07011eb08 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out @@ -199,7 +199,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1000 Data size: 105812 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 211620 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 @@ -207,13 +207,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2] - Statistics: Num rows: 1000 Data size: 105812 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 211620 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 1000 Data size: 105812 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 211620 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out index 7bee405977..135c531e7b 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out @@ -783,7 +783,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint) mode: final outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 9 Data size: 1674 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE pruneGroupingSetId: true Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint) @@ -792,13 +792,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2] - Statistics: Num rows: 9 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 9 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out index 3696cad941..407fc2b22b 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out @@ -1062,7 +1062,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 500 Data size: 66000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 132000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: int), _col0 (type: int), _col2 (type: int), _col3 (type: double), _col4 (type: decimal(38,18)) outputColumnNames: _col0, _col1, _col2, _col3, _col4 @@ -1070,7 +1070,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2, 3, 4] - Statistics: Num rows: 500 Data size: 66000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 132000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col2), count(_col2), sum(_col3), count(_col3), sum(_col4), count(_col4) Group By Vectorization: diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out index fcde000739..66cb4463a3 100644 --- a/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out @@ -576,7 +576,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: decimal(15,2)), _col0 (type: decimal(15,2)) sort order: ++ @@ -588,7 +588,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:decimal(15,2) valueColumns: 2:decimal(25,2) - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: decimal(25,2)) Reducer 3 Execution mode: vectorized, llap @@ -613,7 +613,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -646,7 +646,7 @@ STAGE PLANS: outputTypes: [decimal(35,2), decimal(15,2), decimal(15,2), decimal(25,2)] partitionExpressions: [col 0:decimal(15,2)] streamingColumns: [] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: decimal(35,2)) outputColumnNames: _col0 @@ -654,13 +654,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [3] - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -889,7 +889,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: decimal(15,2)), _col0 (type: decimal(15,2)) sort order: ++ @@ -901,7 +901,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:decimal(15,2) valueColumns: 2:decimal(25,2) - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: decimal(25,2)) Reducer 4 Execution mode: vectorized, llap @@ -926,7 +926,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -959,7 +959,7 @@ STAGE PLANS: outputTypes: [decimal(35,2), decimal(15,2), decimal(15,2), decimal(25,2)] partitionExpressions: [col 0:decimal(15,2)] streamingColumns: [] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: decimal(35,2)) outputColumnNames: _col0 @@ -967,13 +967,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [3] - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1206,7 +1206,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: decimal(15,2)), _col0 (type: decimal(15,2)) sort order: ++ @@ -1218,7 +1218,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:decimal(15,2) valueColumns: 2:decimal(25,2) - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: decimal(25,2)) Reducer 4 Execution mode: vectorized, llap @@ -1243,7 +1243,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -1276,7 +1276,7 @@ STAGE PLANS: outputTypes: [decimal(35,2), decimal(15,2), decimal(15,2), decimal(25,2)] partitionExpressions: [col 0:decimal(15,2)] streamingColumns: [] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: decimal(35,2)) outputColumnNames: _col0 @@ -1284,13 +1284,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [3] - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1508,7 +1508,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 928 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -1528,14 +1528,14 @@ STAGE PLANS: name: sum window function: GenericUDAFSumDouble window frame: RANGE PRECEDING(MAX)~CURRENT - Statistics: Num rows: 2 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 928 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: double) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1906,7 +1906,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(7,2)), KEY._col1 (type: decimal(7,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: decimal(7,2)), _col0 (type: decimal(7,2)) sort order: ++ @@ -1918,7 +1918,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:decimal(7,2) valueColumns: 2:decimal(17,2) - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: decimal(17,2)) Reducer 3 Execution mode: vectorized, llap @@ -1943,7 +1943,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -1976,7 +1976,7 @@ STAGE PLANS: outputTypes: [decimal(27,2), decimal(7,2), decimal(7,2), decimal(17,2)] partitionExpressions: [col 0:decimal(7,2)] streamingColumns: [] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: decimal(27,2)) outputColumnNames: _col0 @@ -1984,13 +1984,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [3] - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2219,7 +2219,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(7,2)), KEY._col1 (type: decimal(7,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: decimal(7,2)), _col0 (type: decimal(7,2)) sort order: ++ @@ -2231,7 +2231,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:decimal(7,2) valueColumns: 2:decimal(17,2) - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: decimal(17,2)) Reducer 4 Execution mode: vectorized, llap @@ -2256,7 +2256,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -2289,7 +2289,7 @@ STAGE PLANS: outputTypes: [decimal(27,2), decimal(7,2), decimal(7,2), decimal(17,2)] partitionExpressions: [col 0:decimal(7,2)] streamingColumns: [] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: decimal(27,2)) outputColumnNames: _col0 @@ -2297,13 +2297,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [3] - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2536,7 +2536,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(7,2)), KEY._col1 (type: decimal(7,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: decimal(7,2)), _col0 (type: decimal(7,2)) sort order: ++ @@ -2548,7 +2548,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:decimal(7,2) valueColumns: 2:decimal(17,2) - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: decimal(17,2)) Reducer 4 Execution mode: vectorized, llap @@ -2573,7 +2573,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0, 2] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -2606,7 +2606,7 @@ STAGE PLANS: outputTypes: [decimal(27,2), decimal(7,2), decimal(7,2), decimal(17,2)] partitionExpressions: [col 0:decimal(7,2)] streamingColumns: [] - Statistics: Num rows: 2 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: decimal(27,2)) outputColumnNames: _col0 @@ -2614,13 +2614,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [3] - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -2838,7 +2838,7 @@ STAGE PLANS: keys: KEY._col0 (type: decimal(7,2)), KEY._col1 (type: decimal(7,2)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 928 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -2858,14 +2858,14 @@ STAGE PLANS: name: sum window function: GenericUDAFSumDouble window frame: RANGE PRECEDING(MAX)~CURRENT - Statistics: Num rows: 2 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 928 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: sum_window_0 (type: double) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing.q.out index 8e8c445af7..e4a225ee13 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing.q.out @@ -340,7 +340,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col0 (type: string) sort order: ++ @@ -352,7 +352,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:string valueColumns: 2:int, 3:double - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int), _col3 (type: double) Reducer 3 Execution mode: llap @@ -365,7 +365,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -400,14 +400,14 @@ STAGE PLANS: window function: GenericUDAFLagEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int), _col3 (type: double), rank_window_0 (type: int), dense_rank_window_1 (type: int), _col2 (type: int), (_col2 - lag_window_2) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -594,7 +594,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col0 (type: string) sort order: ++ @@ -606,7 +606,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:string valueColumns: 2:int, 3:double - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int), _col3 (type: double) Reducer 3 Execution mode: llap @@ -619,7 +619,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -654,14 +654,14 @@ STAGE PLANS: window function: GenericUDAFLagEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int), _col3 (type: double), rank_window_0 (type: int), dense_rank_window_1 (type: int), _col2 (type: int), (_col2 - lag_window_2) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -4152,7 +4152,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: double) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col0 (type: string) sort order: ++ @@ -4164,7 +4164,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:string valueColumns: 2:int, 3:double, 4:double, 5:double - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int), _col3 (type: double), _col4 (type: double), _col5 (type: double) Reducer 3 Execution mode: llap @@ -4177,7 +4177,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -4203,14 +4203,14 @@ STAGE PLANS: name: avg window function: GenericUDAFAverageEvaluatorDouble window frame: ROWS PRECEDING(2)~FOLLOWING(2) - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int), _col3 (type: double), round(sum_window_0, 2) (type: double), _col4 (type: double), _col5 (type: double), round(avg_window_1, 2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 13 Data size: 3419 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6838 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3419 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6838 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -4843,7 +4843,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -4863,15 +4863,15 @@ STAGE PLANS: name: sum window function: GenericUDAFSumDouble window frame: RANGE PRECEDING(MAX)~CURRENT - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), round(_col2, 2) (type: double), round(sum_window_0, 2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: double), _col3 (type: double) Reducer 3 Execution mode: vectorized, llap @@ -4896,13 +4896,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3] - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -6440,7 +6440,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: string), _col0 (type: string) sort order: ++ @@ -6452,7 +6452,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 1:string valueColumns: 2:int, 3:double - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int), _col3 (type: double) Reducer 3 Execution mode: llap @@ -6465,7 +6465,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -6500,14 +6500,14 @@ STAGE PLANS: window function: GenericUDAFLagEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int), _col3 (type: double), rank_window_0 (type: int), dense_rank_window_1 (type: int), _col2 (type: int), (_col2 - lag_window_2) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3211 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6422 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -8098,13 +8098,13 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out index 5943548a6c..22c4b321b4 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out @@ -603,7 +603,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 10 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col2 (type: double) sort order: +- @@ -615,7 +615,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 0:string valueColumns: 1:string, 3:double, 4:int, 5:double - Statistics: Num rows: 10 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3790 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string), _col3 (type: double), _col4 (type: int), _col5 (type: double) Reducer 3 Execution mode: vectorized, llap @@ -640,7 +640,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 2, 1, 3, 4, 5] - Statistics: Num rows: 10 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3790 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -674,7 +674,7 @@ STAGE PLANS: outputTypes: [int, string, string, double, double, int, double] partitionExpressions: [col 0:string] streamingColumns: [6] - Statistics: Num rows: 10 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3790 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: rank_window_0 (type: int), _col1 (type: string), _col3 (type: double), _col4 (type: int), _col5 (type: double) outputColumnNames: rank_window_0, _col1, _col3, _col4, _col5 @@ -682,7 +682,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [6, 2, 3, 4, 5] - Statistics: Num rows: 10 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: lower(_col1) (type: string), _col3 (type: double) sort order: ++ @@ -695,7 +695,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 8:string valueColumns: 6:int, 2:string, 4:int, 5:double - Statistics: Num rows: 10 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 3790 Basic stats: COMPLETE Column stats: COMPLETE value expressions: rank_window_0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: double) Reducer 4 Execution mode: vectorized, llap @@ -720,7 +720,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [2, 3, 1, 4, 5] - Statistics: Num rows: 10 Data size: 1005 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 1925 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -754,7 +754,7 @@ STAGE PLANS: outputTypes: [int, int, string, double, int, double] partitionExpressions: [StringLower(col 3:string) -> 7:string] streamingColumns: [6] - Statistics: Num rows: 10 Data size: 1005 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 1925 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: dense_rank_window_1 (type: int), _col0 (type: int), _col5 (type: int), _col6 (type: double) outputColumnNames: dense_rank_window_1, _col0, _col5, _col6 @@ -762,7 +762,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [6, 2, 4, 5] - Statistics: Num rows: 10 Data size: 1005 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 1925 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col5 (type: int), _col6 (type: double) sort order: ++ @@ -774,7 +774,7 @@ STAGE PLANS: nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true partitionColumns: 4:int valueColumns: 6:int, 2:int - Statistics: Num rows: 10 Data size: 1005 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 1925 Basic stats: COMPLETE Column stats: COMPLETE value expressions: dense_rank_window_1 (type: int), _col0 (type: int) Reducer 5 Execution mode: llap @@ -787,7 +787,7 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: int), VALUE._col1 (type: int), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: double) outputColumnNames: _col0, _col1, _col6, _col7 - Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -808,14 +808,14 @@ STAGE PLANS: window function: GenericUDAFPercentRankEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: int), _col0 (type: int), percent_rank_window_2 (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 10 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 20 Data size: 320 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out index 3dc640a300..624261e00f 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out @@ -744,7 +744,7 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 110092 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(_col1) Group By Vectorization: @@ -991,7 +991,7 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 4586 Data size: 64088 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4587 Data size: 64104 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col1 (type: bigint), _col0 (type: double) sort order: ++ @@ -1000,7 +1000,7 @@ STAGE PLANS: keyColumns: 1:bigint, 0:double native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 4586 Data size: 64088 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4587 Data size: 64104 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.3 Reducer 3 Execution mode: vectorized, llap @@ -1025,7 +1025,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1, 0] - Statistics: Num rows: 4586 Data size: 64088 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4587 Data size: 64104 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 20 Limit Vectorization: diff --git a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out index f929706757..d43480e89f 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out @@ -2296,7 +2296,7 @@ STAGE PLANS: keys: KEY._col0 (type: smallint) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1251 Data size: 83804 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 83872 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: smallint), (UDFToInteger(_col0) % -75) (type: int), power(((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5) (type: double), (-1.389 / CAST( _col0 AS decimal(5,0))) (type: decimal(10,9)), _col4 (type: bigint), (UDFToDouble((UDFToInteger(_col0) % -75)) / UDFToDouble(_col4)) (type: double), (- (UDFToInteger(_col0) % -75)) (type: int), ((_col5 - ((_col6 * _col6) / _col7)) / _col7) (type: double), (- (- (UDFToInteger(_col0) % -75))) (type: int), _col8 (type: bigint), (_col8 - -89010L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 @@ -2305,7 +2305,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [0, 9, 10, 16, 4, 18, 19, 17, 14, 8, 20] selectExpressions: LongColModuloLongScalar(col 0:int, val -75)(children: col 0:smallint) -> 9:int, FuncPowerDoubleToDouble(col 11:double)(children: DoubleColDivideLongColumn(col 10:double, col 14:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 11:double)(children: DoubleColDivideLongColumn(col 10:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 10:double) -> 11:double) -> 10:double, IfExprNullCondExpr(col 12:boolean, null, col 13:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 12:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 13:bigint) -> 14:bigint) -> 11:double) -> 10:double, DecimalScalarDivideDecimalColumn(val -1.389, col 15:decimal(5,0))(children: CastLongToDecimal(col 0:smallint) -> 15:decimal(5,0)) -> 16:decimal(10,9), DoubleColDivideDoubleColumn(col 11:double, col 17:double)(children: CastLongToDouble(col 14:int)(children: LongColModuloLongScalar(col 0:int, val -75)(children: col 0:smallint) -> 14:int) -> 11:double, CastLongToDouble(col 4:bigint) -> 17:double) -> 18:double, LongColUnaryMinus(col 14:int)(children: LongColModuloLongScalar(col 0:int, val -75)(children: col 0:smallint) -> 14:int) -> 19:int, DoubleColDivideLongColumn(col 11:double, col 7:bigint)(children: DoubleColSubtractDoubleColumn(col 5:double, col 17:double)(children: DoubleColDivideLongColumn(col 11:double, col 7:bigint)(children: DoubleColMultiplyDoubleColumn(col 6:double, col 6:double) -> 11:double) -> 17:double) -> 11:double) -> 17:double, LongColUnaryMinus(col 20:int)(children: LongColUnaryMinus(col 14:int)(children: LongColModuloLongScalar(col 0:int, val -75)(children: col 0:smallint) -> 14:int) -> 20:int) -> 14:int, LongColSubtractLongScalar(col 8:bigint, val -89010) -> 20:bigint - Statistics: Num rows: 1251 Data size: 218912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 219088 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: double), _col3 (type: decimal(10,9)), _col4 (type: bigint), _col5 (type: double), _col6 (type: int), _col7 (type: double), _col8 (type: int), _col9 (type: bigint), _col10 (type: bigint) sort order: +++++++++++ @@ -2313,7 +2313,7 @@ STAGE PLANS: className: VectorReduceSinkObjectHashOperator native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 1251 Data size: 218912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 219088 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 Reducer 3 Execution mode: vectorized, llap @@ -2331,7 +2331,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - Statistics: Num rows: 1251 Data size: 218912 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 219088 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 20 Limit Vectorization: @@ -2578,7 +2578,7 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1251 Data size: 57520 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 57568 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: double), ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END) (type: double), (2563.58D * ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END)) (type: double), (- ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END)) (type: double), _col4 (type: bigint), ((2563.58D * ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END)) + -5638.15D) (type: double), ((- ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END)) * ((2563.58D * ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END)) + -5638.15D)) (type: double), _col5 (type: double), ((_col1 - ((_col2 * _col2) / _col3)) / _col3) (type: double), (_col0 - (- ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END))) (type: double), power(((_col1 - ((_col2 * _col2) / _col3)) / _col3), 0.5) (type: double), (_col0 + ((_col1 - ((_col2 * _col2) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END)) (type: double), (_col0 * 762.0D) (type: double), _col2 (type: double), (-863.257D % (_col0 * 762.0D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 @@ -2587,7 +2587,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [0, 7, 6, 11, 4, 17, 20, 5, 23, 26, 14, 29, 30, 2, 34] selectExpressions: DoubleColDivideLongColumn(col 6:double, col 10:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 7:double)(children: DoubleColDivideLongColumn(col 6:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 6:double) -> 7:double) -> 6:double, IfExprNullCondExpr(col 8:boolean, null, col 9:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 8:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 9:bigint) -> 10:bigint) -> 7:double, DoubleScalarMultiplyDoubleColumn(val 2563.58, col 11:double)(children: DoubleColDivideLongColumn(col 6:double, col 13:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 11:double)(children: DoubleColDivideLongColumn(col 6:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 6:double) -> 11:double) -> 6:double, IfExprNullCondExpr(col 10:boolean, null, col 12:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 10:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 12:bigint) -> 13:bigint) -> 11:double) -> 6:double, DoubleColUnaryMinus(col 14:double)(children: DoubleColDivideLongColumn(col 11:double, col 16:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 14:double)(children: DoubleColDivideLongColumn(col 11:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 11:double) -> 14:double) -> 11:double, IfExprNullCondExpr(col 13:boolean, null, col 15:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 13:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 15:bigint) -> 16:bigint) -> 14:double) -> 11:double, DoubleColAddDoubleScalar(col 14:double, val -5638.15)(children: DoubleScalarMultiplyDoubleColumn(val 2563.58, col 17:double)(children: DoubleColDivideLongColumn(col 14:double, col 19:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 17:double)(children: DoubleColDivideLongColumn(col 14:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 14:double) -> 17:double) -> 14:double, IfExprNullCondExpr(col 16:boolean, null, col 18:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 16:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 18:bigint) -> 19:bigint) -> 17:double) -> 14:double) -> 17:double, DoubleColMultiplyDoubleColumn(col 14:double, col 23:double)(children: DoubleColUnaryMinus(col 20:double)(children: DoubleColDivideLongColumn(col 14:double, col 22:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 20:double)(children: DoubleColDivideLongColumn(col 14:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 14:double) -> 20:double) -> 14:double, IfExprNullCondExpr(col 19:boolean, null, col 21:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 19:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 21:bigint) -> 22:bigint) -> 20:double) -> 14:double, DoubleColAddDoubleScalar(col 20:double, val -5638.15)(children: DoubleScalarMultiplyDoubleColumn(val 2563.58, col 23:double)(children: DoubleColDivideLongColumn(col 20:double, col 25:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 23:double)(children: DoubleColDivideLongColumn(col 20:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 20:double) -> 23:double) -> 20:double, IfExprNullCondExpr(col 22:boolean, null, col 24:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 22:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 24:bigint) -> 25:bigint) -> 23:double) -> 20:double) -> 23:double) -> 20:double, DoubleColDivideLongColumn(col 14:double, col 3:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 23:double)(children: DoubleColDivideLongColumn(col 14:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 14:double) -> 23:double) -> 14:double) -> 23:double, DoubleColSubtractDoubleColumn(col 0:double, col 14:double)(children: DoubleColUnaryMinus(col 26:double)(children: DoubleColDivideLongColumn(col 14:double, col 28:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 26:double)(children: DoubleColDivideLongColumn(col 14:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 14:double) -> 26:double) -> 14:double, IfExprNullCondExpr(col 25:boolean, null, col 27:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 25:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 27:bigint) -> 28:bigint) -> 26:double) -> 14:double) -> 26:double, FuncPowerDoubleToDouble(col 29:double)(children: DoubleColDivideLongColumn(col 14:double, col 3:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 29:double)(children: DoubleColDivideLongColumn(col 14:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 14:double) -> 29:double) -> 14:double) -> 29:double) -> 14:double, DoubleColAddDoubleColumn(col 0:double, col 30:double)(children: DoubleColDivideLongColumn(col 29:double, col 32:bigint)(children: DoubleColSubtractDoubleColumn(col 1:double, col 30:double)(children: DoubleColDivideLongColumn(col 29:double, col 3:bigint)(children: DoubleColMultiplyDoubleColumn(col 2:double, col 2:double) -> 29:double) -> 30:double) -> 29:double, IfExprNullCondExpr(col 28:boolean, null, col 31:bigint)(children: LongColEqualLongScalar(col 3:bigint, val 1) -> 28:boolean, LongColSubtractLongScalar(col 3:bigint, val 1) -> 31:bigint) -> 32:bigint) -> 30:double) -> 29:double, DoubleColMultiplyDoubleScalar(col 0:double, val 762.0) -> 30:double, DoubleScalarModuloDoubleColumn(val -863.257, col 33:double)(children: DoubleColMultiplyDoubleScalar(col 0:double, val 762.0) -> 33:double) -> 34:double - Statistics: Num rows: 1251 Data size: 157600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 157728 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: double) sort order: + @@ -2595,7 +2595,7 @@ STAGE PLANS: className: VectorReduceSinkObjectHashOperator native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 1251 Data size: 157600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 157728 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double) Reducer 3 Execution mode: vectorized, llap @@ -2613,13 +2613,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13] - Statistics: Num rows: 1251 Data size: 157600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 157728 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 1251 Data size: 157600 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1252 Data size: 157728 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out index 56e81aa819..ef3a334b45 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out @@ -1398,7 +1398,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -1433,14 +1433,14 @@ STAGE PLANS: window function: GenericUDAFLagEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), _col2 (type: int), (_col2 - lag_window_2) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 13 Data size: 3107 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6214 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3107 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6214 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3677,13 +3677,13 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 13 Data size: 2899 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -3867,11 +3867,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), round(_col2, 2) (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -3885,12 +3885,12 @@ STAGE PLANS: output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: double) Reducer 3 Execution mode: llap @@ -3903,7 +3903,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE PTF Operator Function definitions: Input definition @@ -3923,14 +3923,14 @@ STAGE PLANS: name: sum window function: GenericUDAFSumDouble window frame: ROWS PRECEDING(2)~CURRENT - Statistics: Num rows: 13 Data size: 2574 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5148 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: double), round(sum_window_0, 2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 2678 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 5356 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/windowing.q.out b/ql/src/test/results/clientpositive/llap/windowing.q.out index ffd21abb4c..fe897643d7 100644 --- a/ql/src/test/results/clientpositive/llap/windowing.q.out +++ b/ql/src/test/results/clientpositive/llap/windowing.q.out @@ -1895,10 +1895,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int), KEY._col3 (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 3003 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 26 Data size: 6006 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query11.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query11.q.out index 00b6bcbe1a..f5f2be255b 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query11.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query11.q.out @@ -192,15 +192,15 @@ Stage-0 File Output Operator [FS_349] Limit [LIM_348] (rows=100 width=85) Number of rows:100 - Select Operator [SEL_347] (rows=12248093 width=85) + Select Operator [SEL_347] (rows=29679601 width=85) Output:["_col0"] <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_89] - Select Operator [SEL_88] (rows=12248093 width=85) + Select Operator [SEL_88] (rows=29679601 width=85) Output:["_col0"] - Filter Operator [FIL_87] (rows=12248093 width=537) + Filter Operator [FIL_87] (rows=29679601 width=537) predicate:CASE WHEN (_col3 is not null) THEN (CASE WHEN (_col6) THEN (((_col1 / _col5) > (_col9 / _col3))) ELSE ((null > (_col9 / _col3))) END) ELSE (CASE WHEN (_col6) THEN (((_col1 / _col5) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_283] (rows=24496186 width=537) + Merge Join Operator [MERGEJOIN_283] (rows=59359203 width=537) Conds:RS_84._col2=RS_346._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col8","_col9"] <-Reducer 20 [SIMPLE_EDGE] vectorized SHUFFLE [RS_346] @@ -260,18 +260,18 @@ Stage-0 <-Reducer 6 [ONE_TO_ONE_EDGE] FORWARD [RS_84] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_282] (rows=20485011 width=440) + Merge Join Operator [MERGEJOIN_282] (rows=49639315 width=440) Conds:RS_81._col2=RS_338._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6"] <-Reducer 16 [SIMPLE_EDGE] vectorized SHUFFLE [RS_338] PartitionCols:_col0 - Select Operator [SEL_337] (rows=17130654 width=216) + Select Operator [SEL_337] (rows=26666666 width=216) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_336] (rows=17130654 width=212) + Filter Operator [FIL_336] (rows=26666666 width=212) predicate:(_col7 > 0) - Select Operator [SEL_335] (rows=51391963 width=212) + Select Operator [SEL_335] (rows=80000000 width=212) Output:["_col0","_col7"] - Group By Operator [GBY_334] (rows=51391963 width=764) + Group By Operator [GBY_334] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 15 [SIMPLE_EDGE] SHUFFLE [RS_55] @@ -320,7 +320,7 @@ Stage-0 <-Reducer 5 [ONE_TO_ONE_EDGE] FORWARD [RS_81] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_281] (rows=31888273 width=324) + Merge Join Operator [MERGEJOIN_281] (rows=49639315 width=324) Conds:RS_318._col0=RS_328._col0(Inner),Output:["_col1","_col2","_col3"] <-Reducer 12 [SIMPLE_EDGE] vectorized SHUFFLE [RS_328] @@ -380,9 +380,9 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] vectorized SHUFFLE [RS_318] PartitionCols:_col0 - Select Operator [SEL_317] (rows=51391963 width=212) + Select Operator [SEL_317] (rows=80000000 width=212) Output:["_col0","_col1"] - Group By Operator [GBY_316] (rows=51391963 width=764) + Group By Operator [GBY_316] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_16] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query14.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query14.q.out index e8a6eaa464..5b353ce462 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query14.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query14.q.out @@ -327,29 +327,29 @@ Stage-0 Stage-1 Reducer 9 vectorized File Output Operator [FS_1703] - Limit [LIM_1702] (rows=7 width=192) + Limit [LIM_1702] (rows=8 width=193) Number of rows:100 - Select Operator [SEL_1701] (rows=7 width=192) + Select Operator [SEL_1701] (rows=8 width=193) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Reducer 8 [SIMPLE_EDGE] vectorized SHUFFLE [RS_1700] - Select Operator [SEL_1699] (rows=7 width=192) + Select Operator [SEL_1699] (rows=8 width=193) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Group By Operator [GBY_1698] (rows=7 width=200) + Group By Operator [GBY_1698] (rows=8 width=201) Output:["_col0","_col1","_col2","_col3","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4 <-Union 7 [SIMPLE_EDGE] <-Reducer 14 [CONTAINS] Reduce Output Operator [RS_1462] PartitionCols:_col0, _col1, _col2, _col3, _col4 - Group By Operator [GBY_1461] (rows=7 width=200) + Group By Operator [GBY_1461] (rows=15 width=199) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0L - Top N Key Operator [TNK_1460] (rows=3 width=221) + Top N Key Operator [TNK_1460] (rows=6 width=215) keys:_col0, _col1, _col2, _col3, 0L,sort order:+++++,top n:100 - Select Operator [SEL_1458] (rows=1 width=223) + Select Operator [SEL_1458] (rows=2 width=217) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Filter Operator [FIL_1457] (rows=1 width=244) + Filter Operator [FIL_1457] (rows=2 width=238) predicate:(_col5 > _col1) - Merge Join Operator [MERGEJOIN_1456] (rows=1 width=244) + Merge Join Operator [MERGEJOIN_1456] (rows=8 width=233) Conds:(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"] <-Reducer 13 [CUSTOM_SIMPLE_EDGE] PARTITION_ONLY_SHUFFLE [RS_371] @@ -594,7 +594,7 @@ Stage-0 Please refer to the previous Select Operator [SEL_1603] <-Reducer 62 [CUSTOM_SIMPLE_EDGE] vectorized PARTITION_ONLY_SHUFFLE [RS_1725] - Group By Operator [GBY_1724] (rows=1 width=132) + Group By Operator [GBY_1724] (rows=8 width=121) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 61 [SIMPLE_EDGE] SHUFFLE [RS_365] @@ -841,15 +841,15 @@ Stage-0 <-Reducer 19 [CONTAINS] Reduce Output Operator [RS_1475] PartitionCols:_col0, _col1, _col2, _col3, _col4 - Group By Operator [GBY_1474] (rows=7 width=200) + Group By Operator [GBY_1474] (rows=15 width=199) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0L - Top N Key Operator [TNK_1473] (rows=3 width=221) + Top N Key Operator [TNK_1473] (rows=6 width=215) keys:_col0, _col1, _col2, _col3, 0L,sort order:+++++,top n:100 - Select Operator [SEL_1471] (rows=1 width=219) + Select Operator [SEL_1471] (rows=2 width=213) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Filter Operator [FIL_1470] (rows=1 width=244) + Filter Operator [FIL_1470] (rows=2 width=238) predicate:(_col5 > _col1) - Merge Join Operator [MERGEJOIN_1469] (rows=1 width=244) + Merge Join Operator [MERGEJOIN_1469] (rows=8 width=233) Conds:(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"] <-Reducer 18 [CUSTOM_SIMPLE_EDGE] PARTITION_ONLY_SHUFFLE [RS_560] @@ -983,7 +983,7 @@ Stage-0 Please refer to the previous Select Operator [SEL_1807] <-Reducer 67 [CUSTOM_SIMPLE_EDGE] vectorized PARTITION_ONLY_SHUFFLE [RS_1747] - Group By Operator [GBY_1746] (rows=1 width=132) + Group By Operator [GBY_1746] (rows=8 width=121) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 66 [SIMPLE_EDGE] SHUFFLE [RS_554] @@ -1104,15 +1104,15 @@ Stage-0 <-Reducer 6 [CONTAINS] Reduce Output Operator [RS_1449] PartitionCols:_col0, _col1, _col2, _col3, _col4 - Group By Operator [GBY_1448] (rows=7 width=200) + Group By Operator [GBY_1448] (rows=15 width=199) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0L - Top N Key Operator [TNK_1447] (rows=3 width=221) + Top N Key Operator [TNK_1447] (rows=6 width=215) keys:_col0, _col1, _col2, _col3, 0L,sort order:+++++,top n:100 - Select Operator [SEL_1445] (rows=1 width=221) + Select Operator [SEL_1445] (rows=2 width=215) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Filter Operator [FIL_1444] (rows=1 width=244) + Filter Operator [FIL_1444] (rows=2 width=238) predicate:(_col5 > _col1) - Merge Join Operator [MERGEJOIN_1443] (rows=1 width=244) + Merge Join Operator [MERGEJOIN_1443] (rows=8 width=233) Conds:(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"] <-Reducer 5 [CUSTOM_SIMPLE_EDGE] PARTITION_ONLY_SHUFFLE [RS_183] @@ -1246,7 +1246,7 @@ Stage-0 Please refer to the previous Select Operator [SEL_1793] <-Reducer 56 [CUSTOM_SIMPLE_EDGE] vectorized PARTITION_ONLY_SHUFFLE [RS_1697] - Group By Operator [GBY_1696] (rows=1 width=132) + Group By Operator [GBY_1696] (rows=8 width=121) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 55 [SIMPLE_EDGE] SHUFFLE [RS_177] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query33.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query33.q.out index 6d7c620dea..710f7d6fba 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query33.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query33.q.out @@ -195,13 +195,13 @@ Stage-0 Stage-1 Reducer 7 vectorized File Output Operator [FS_368] - Limit [LIM_367] (rows=59 width=115) + Limit [LIM_367] (rows=60 width=115) Number of rows:100 - Select Operator [SEL_366] (rows=59 width=115) + Select Operator [SEL_366] (rows=60 width=115) Output:["_col0","_col1"] <-Reducer 6 [SIMPLE_EDGE] vectorized SHUFFLE [RS_365] - Group By Operator [GBY_364] (rows=59 width=115) + Group By Operator [GBY_364] (rows=60 width=115) Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0 <-Union 5 [SIMPLE_EDGE] <-Reducer 11 [CONTAINS] vectorized diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query34.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query34.q.out index b717b285ef..2e4e5fe428 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query34.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query34.q.out @@ -110,9 +110,9 @@ Stage-0 PartitionCols:_col1 Filter Operator [FIL_131] (rows=6 width=12) predicate:_col2 BETWEEN 15 AND 20 - Select Operator [SEL_130] (rows=5521356 width=12) + Select Operator [SEL_130] (rows=479121995 width=12) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_129] (rows=5521356 width=12) + Group By Operator [GBY_129] (rows=479121995 width=12) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_25] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query4.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query4.q.out index 2f8ab17bf0..e001a4a93f 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query4.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query4.q.out @@ -274,15 +274,15 @@ Stage-0 File Output Operator [FS_550] Limit [LIM_549] (rows=100 width=85) Number of rows:100 - Select Operator [SEL_548] (rows=7323197 width=85) + Select Operator [SEL_548] (rows=17745586 width=85) Output:["_col0"] <-Reducer 9 [SIMPLE_EDGE] SHUFFLE [RS_135] - Select Operator [SEL_134] (rows=7323197 width=85) + Select Operator [SEL_134] (rows=17745586 width=85) Output:["_col0"] - Filter Operator [FIL_133] (rows=7323197 width=537) + Filter Operator [FIL_133] (rows=17745586 width=537) predicate:CASE WHEN (_col3 is not null) THEN (CASE WHEN (_col9) THEN (((_col11 / _col8) > (_col14 / _col3))) ELSE ((null > (_col14 / _col3))) END) ELSE (CASE WHEN (_col9) THEN (((_col11 / _col8) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_466] (rows=14646395 width=537) + Merge Join Operator [MERGEJOIN_466] (rows=35491172 width=537) Conds:RS_130._col2=RS_547._col0(Inner),Output:["_col3","_col8","_col9","_col11","_col13","_col14"] <-Reducer 30 [SIMPLE_EDGE] vectorized SHUFFLE [RS_547] @@ -342,9 +342,9 @@ Stage-0 <-Reducer 8 [ONE_TO_ONE_EDGE] FORWARD [RS_130] PartitionCols:_col2 - Filter Operator [FIL_129] (rows=12248093 width=668) + Filter Operator [FIL_129] (rows=29679601 width=668) predicate:CASE WHEN (_col6) THEN (CASE WHEN (_col9) THEN (((_col11 / _col8) > (_col1 / _col5))) ELSE ((null > (_col1 / _col5))) END) ELSE (CASE WHEN (_col9) THEN (((_col11 / _col8) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_465] (rows=24496186 width=668) + Merge Join Operator [MERGEJOIN_465] (rows=59359203 width=668) Conds:RS_126._col2=RS_541._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col8","_col9","_col11"] <-Reducer 26 [SIMPLE_EDGE] vectorized SHUFFLE [RS_541] @@ -396,7 +396,7 @@ Stage-0 <-Reducer 7 [ONE_TO_ONE_EDGE] FORWARD [RS_126] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_464] (rows=20485011 width=556) + Merge Join Operator [MERGEJOIN_464] (rows=49639315 width=556) Conds:RS_123._col2=RS_535._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col8","_col9"] <-Reducer 22 [SIMPLE_EDGE] vectorized SHUFFLE [RS_535] @@ -456,18 +456,18 @@ Stage-0 <-Reducer 6 [ONE_TO_ONE_EDGE] FORWARD [RS_123] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_463] (rows=20485011 width=440) + Merge Join Operator [MERGEJOIN_463] (rows=49639315 width=440) Conds:RS_120._col2=RS_527._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6"] <-Reducer 18 [SIMPLE_EDGE] vectorized SHUFFLE [RS_527] PartitionCols:_col0 - Select Operator [SEL_526] (rows=17130654 width=216) + Select Operator [SEL_526] (rows=26666666 width=216) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_525] (rows=17130654 width=212) + Filter Operator [FIL_525] (rows=26666666 width=212) predicate:(_col7 > 0) - Select Operator [SEL_524] (rows=51391963 width=212) + Select Operator [SEL_524] (rows=80000000 width=212) Output:["_col0","_col7"] - Group By Operator [GBY_523] (rows=51391963 width=764) + Group By Operator [GBY_523] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 17 [SIMPLE_EDGE] SHUFFLE [RS_55] @@ -516,7 +516,7 @@ Stage-0 <-Reducer 5 [ONE_TO_ONE_EDGE] FORWARD [RS_120] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_462] (rows=31888273 width=324) + Merge Join Operator [MERGEJOIN_462] (rows=49639315 width=324) Conds:RS_511._col0=RS_519._col0(Inner),Output:["_col1","_col2","_col3"] <-Reducer 14 [SIMPLE_EDGE] vectorized SHUFFLE [RS_519] @@ -576,9 +576,9 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] vectorized SHUFFLE [RS_511] PartitionCols:_col0 - Select Operator [SEL_510] (rows=51391963 width=212) + Select Operator [SEL_510] (rows=80000000 width=212) Output:["_col0","_col1"] - Group By Operator [GBY_509] (rows=51391963 width=764) + Group By Operator [GBY_509] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_16] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query49.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query49.q.out index 324eef20d0..b235c2f370 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query49.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query49.q.out @@ -302,21 +302,21 @@ Stage-0 File Output Operator [FS_310] Limit [LIM_309] (rows=100 width=215) Number of rows:100 - Select Operator [SEL_308] (rows=3418 width=215) + Select Operator [SEL_308] (rows=1714 width=215) Output:["_col0","_col1","_col2","_col3","_col4"] <-Reducer 10 [SIMPLE_EDGE] vectorized SHUFFLE [RS_307] - Select Operator [SEL_306] (rows=3418 width=215) + Select Operator [SEL_306] (rows=1714 width=215) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_305] (rows=3418 width=215) + Group By Operator [GBY_305] (rows=1714 width=215) Output:["_col0","_col1","_col2","_col3","_col4"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4 <-Union 9 [SIMPLE_EDGE] <-Reducer 24 [CONTAINS] vectorized Reduce Output Operator [RS_351] PartitionCols:_col0, _col1, _col2, _col3, _col4 - Group By Operator [GBY_350] (rows=3418 width=215) + Group By Operator [GBY_350] (rows=2856 width=215) Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2 - Top N Key Operator [TNK_349] (rows=3418 width=214) + Top N Key Operator [TNK_349] (rows=2856 width=214) keys:_col0, _col3, _col4, _col1, _col2,sort order:+++++,top n:100 Select Operator [SEL_348] (rows=1142 width=213) Output:["_col0","_col1","_col2","_col3","_col4"] @@ -395,13 +395,13 @@ Stage-0 <-Reducer 8 [CONTAINS] vectorized Reduce Output Operator [RS_304] PartitionCols:_col0, _col1, _col2, _col3, _col4 - Group By Operator [GBY_303] (rows=3418 width=215) + Group By Operator [GBY_303] (rows=2856 width=215) Output:["_col0","_col1","_col2","_col3","_col4"],keys:_col0, _col3, _col4, _col1, _col2 - Top N Key Operator [TNK_302] (rows=3418 width=214) + Top N Key Operator [TNK_302] (rows=2856 width=214) keys:_col0, _col3, _col4, _col1, _col2,sort order:+++++,top n:100 - Select Operator [SEL_301] (rows=2276 width=215) + Select Operator [SEL_301] (rows=1714 width=215) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_300] (rows=2276 width=215) + Group By Operator [GBY_300] (rows=1714 width=215) Output:["_col0","_col1","_col2","_col3","_col4"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4 <-Union 7 [SIMPLE_EDGE] <-Reducer 18 [CONTAINS] vectorized diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query5.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query5.q.out index 32b0e3ec2a..fd576c8705 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query5.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query5.q.out @@ -329,7 +329,7 @@ Stage-0 <-Reducer 13 [SIMPLE_EDGE] SHUFFLE [RS_45] PartitionCols:_col0 - Group By Operator [GBY_44] (rows=46000 width=548) + Group By Operator [GBY_44] (rows=2835758 width=548) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col8 Merge Join Operator [MERGEJOIN_219] (rows=34813117 width=535) Conds:RS_40._col0=RS_305._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col8"] @@ -398,7 +398,7 @@ Stage-0 <-Reducer 17 [SIMPLE_EDGE] SHUFFLE [RS_77] PartitionCols:_col0 - Group By Operator [GBY_76] (rows=84 width=548) + Group By Operator [GBY_76] (rows=3498 width=548) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col8 Merge Join Operator [MERGEJOIN_221] (rows=30966059 width=543) Conds:RS_72._col0=RS_312._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col8"] @@ -478,7 +478,7 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] SHUFFLE [RS_21] PartitionCols:_col0 - Group By Operator [GBY_20] (rows=1704 width=548) + Group By Operator [GBY_20] (rows=78090 width=548) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col8 Merge Join Operator [MERGEJOIN_217] (rows=64325014 width=376) Conds:RS_16._col0=RS_289._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col8"] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query70.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query70.q.out index 184d281df0..2df373a2a9 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query70.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query70.q.out @@ -103,22 +103,22 @@ Stage-0 File Output Operator [FS_168] Limit [LIM_167] (rows=100 width=492) Number of rows:100 - Select Operator [SEL_166] (rows=240 width=492) + Select Operator [SEL_166] (rows=720 width=492) Output:["_col0","_col1","_col2","_col3","_col4"] <-Reducer 6 [SIMPLE_EDGE] vectorized SHUFFLE [RS_165] - Select Operator [SEL_164] (rows=240 width=492) + Select Operator [SEL_164] (rows=720 width=492) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - PTF Operator [PTF_163] (rows=240 width=304) + PTF Operator [PTF_163] (rows=720 width=304) Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col2 DESC NULLS LAST","partition by:":"(grouping(_col3, 1) + grouping(_col3, 0)), CASE WHEN ((grouping(_col3, 0) = 0)) THEN (_col0) ELSE (CAST( null AS STRING)) END"}] - Select Operator [SEL_162] (rows=240 width=304) + Select Operator [SEL_162] (rows=720 width=304) Output:["_col0","_col1","_col2","_col3"] <-Reducer 5 [SIMPLE_EDGE] vectorized SHUFFLE [RS_161] PartitionCols:(grouping(_col3, 1) + grouping(_col3, 0)), CASE WHEN ((grouping(_col3, 0) = 0)) THEN (_col0) ELSE (CAST( null AS STRING)) END - Select Operator [SEL_160] (rows=240 width=304) + Select Operator [SEL_160] (rows=720 width=304) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_159] (rows=240 width=304) + Group By Operator [GBY_159] (rows=720 width=304) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 4 [SIMPLE_EDGE] SHUFFLE [RS_49] @@ -148,7 +148,7 @@ Stage-0 <-Reducer 8 [SIMPLE_EDGE] SHUFFLE [RS_26] PartitionCols:_col0 - Group By Operator [GBY_25] (rows=1704 width=198) + Group By Operator [GBY_25] (rows=2989 width=198) Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col5 Merge Join Operator [MERGEJOIN_133] (rows=91197860 width=168) Conds:RS_21._col1=RS_151._col0(Inner),Output:["_col2","_col5"] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query73.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query73.q.out index 53345420af..63224568f8 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query73.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query73.q.out @@ -104,9 +104,9 @@ Stage-0 PartitionCols:_col1 Filter Operator [FIL_131] (rows=5 width=12) predicate:_col2 BETWEEN 1 AND 5 - Select Operator [SEL_130] (rows=788766 width=12) + Select Operator [SEL_130] (rows=215975848 width=12) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_129] (rows=788766 width=12) + Group By Operator [GBY_129] (rows=215975848 width=12) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_25] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out index 12ed5c8ecb..ee9d51b837 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out @@ -164,15 +164,15 @@ Stage-0 File Output Operator [FS_348] Limit [LIM_347] (rows=100 width=280) Number of rows:100 - Select Operator [SEL_346] (rows=12248093 width=280) + Select Operator [SEL_346] (rows=29679601 width=280) Output:["_col0","_col1","_col2"] <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_89] - Select Operator [SEL_88] (rows=12248093 width=280) + Select Operator [SEL_88] (rows=29679601 width=280) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_87] (rows=12248093 width=732) + Filter Operator [FIL_87] (rows=29679601 width=732) predicate:CASE WHEN (_col3 is not null) THEN (CASE WHEN (_col6) THEN (((_col1 / _col5) > (_col10 / _col3))) ELSE ((null > (_col10 / _col3))) END) ELSE (CASE WHEN (_col6) THEN (((_col1 / _col5) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_283] (rows=24496186 width=732) + Merge Join Operator [MERGEJOIN_283] (rows=59359203 width=732) Conds:RS_84._col2=RS_345._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col7","_col8","_col9","_col10"] <-Reducer 20 [SIMPLE_EDGE] vectorized SHUFFLE [RS_345] @@ -230,18 +230,18 @@ Stage-0 <-Reducer 6 [ONE_TO_ONE_EDGE] FORWARD [RS_84] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_282] (rows=20485011 width=440) + Merge Join Operator [MERGEJOIN_282] (rows=49639315 width=440) Conds:RS_81._col2=RS_338._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6"] <-Reducer 16 [SIMPLE_EDGE] vectorized SHUFFLE [RS_338] PartitionCols:_col0 - Select Operator [SEL_337] (rows=17130654 width=216) + Select Operator [SEL_337] (rows=26666666 width=216) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_336] (rows=17130654 width=212) + Filter Operator [FIL_336] (rows=26666666 width=212) predicate:(_col3 > 0) - Select Operator [SEL_335] (rows=51391963 width=212) + Select Operator [SEL_335] (rows=80000000 width=212) Output:["_col0","_col3"] - Group By Operator [GBY_334] (rows=51391963 width=392) + Group By Operator [GBY_334] (rows=80000000 width=392) Output:["_col0","_col1","_col2","_col3"],aggregations:["max(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 15 [SIMPLE_EDGE] SHUFFLE [RS_55] @@ -290,7 +290,7 @@ Stage-0 <-Reducer 5 [ONE_TO_ONE_EDGE] FORWARD [RS_81] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_281] (rows=31888273 width=324) + Merge Join Operator [MERGEJOIN_281] (rows=49639315 width=324) Conds:RS_318._col0=RS_328._col0(Inner),Output:["_col1","_col2","_col3"] <-Reducer 12 [SIMPLE_EDGE] vectorized SHUFFLE [RS_328] @@ -350,9 +350,9 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] vectorized SHUFFLE [RS_318] PartitionCols:_col0 - Select Operator [SEL_317] (rows=51391963 width=212) + Select Operator [SEL_317] (rows=80000000 width=212) Output:["_col0","_col1"] - Group By Operator [GBY_316] (rows=51391963 width=392) + Group By Operator [GBY_316] (rows=80000000 width=392) Output:["_col0","_col1","_col2","_col3"],aggregations:["max(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_16] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query77.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query77.q.out index 78f7c28b7a..2e09c26c51 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query77.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query77.q.out @@ -264,15 +264,15 @@ Stage-0 Stage-1 Reducer 7 vectorized File Output Operator [FS_271] - Limit [LIM_270] (rows=24 width=437) + Limit [LIM_270] (rows=58 width=437) Number of rows:100 - Select Operator [SEL_269] (rows=24 width=437) + Select Operator [SEL_269] (rows=58 width=437) Output:["_col0","_col1","_col2","_col3","_col4"] <-Reducer 6 [SIMPLE_EDGE] vectorized SHUFFLE [RS_268] - Select Operator [SEL_267] (rows=24 width=437) + Select Operator [SEL_267] (rows=58 width=437) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_266] (rows=24 width=445) + Group By Operator [GBY_266] (rows=58 width=445) Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 5 [SIMPLE_EDGE] <-Reducer 14 [CONTAINS] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query78.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query78.q.out index ef4d6dbc21..7980131423 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query78.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query78.q.out @@ -158,22 +158,22 @@ Stage-0 File Output Operator [FS_238] Limit [LIM_237] (rows=100 width=484) Number of rows:100 - Select Operator [SEL_236] (rows=462576000046 width=483) + Select Operator [SEL_236] (rows=22243972511798 width=483) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"] <-Reducer 5 [SIMPLE_EDGE] SHUFFLE [RS_73] - Select Operator [SEL_72] (rows=462576000046 width=719) + Select Operator [SEL_72] (rows=22243972511798 width=719) Output:["_col0","_col1","_col6","_col7","_col8","_col9","_col10","_col11","_col12"] - Filter Operator [FIL_71] (rows=462576000046 width=702) + Filter Operator [FIL_71] (rows=22243972511798 width=703) predicate:CASE WHEN (_col11 is not null) THEN ((_col11 > 0L)) ELSE (false) END - Merge Join Operator [MERGEJOIN_191] (rows=925152000093 width=702) + Merge Join Operator [MERGEJOIN_191] (rows=44487945023596 width=703) Conds:RS_68._col1=RS_235._col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9","_col11","_col12","_col13"] <-Reducer 12 [SIMPLE_EDGE] vectorized SHUFFLE [RS_235] PartitionCols:_col0 - Select Operator [SEL_234] (rows=101592102 width=235) + Select Operator [SEL_234] (rows=286549727 width=235) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_233] (rows=101592102 width=239) + Group By Operator [GBY_233] (rows=286549727 width=239) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1 <-Reducer 11 [SIMPLE_EDGE] SHUFFLE [RS_65] @@ -230,16 +230,16 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] SHUFFLE [RS_68] PartitionCols:_col1 - Filter Operator [FIL_45] (rows=4580227799 width=471) + Filter Operator [FIL_45] (rows=78336887605 width=471) predicate:CASE WHEN (_col7 is not null) THEN ((_col7 > 0L)) ELSE (false) END - Merge Join Operator [MERGEJOIN_190] (rows=9160455599 width=471) + Merge Join Operator [MERGEJOIN_190] (rows=156673775210 width=471) Conds:RS_215._col1, _col0=RS_225._col1, _col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9"] <-Reducer 3 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_215] PartitionCols:_col1, _col0 - Select Operator [SEL_214] (rows=114214965 width=239) + Select Operator [SEL_214] (rows=550076554 width=239) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_213] (rows=114214965 width=239) + Group By Operator [GBY_213] (rows=550076554 width=239) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_18] @@ -291,9 +291,9 @@ Stage-0 <-Reducer 9 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_225] PartitionCols:_col1, _col0 - Select Operator [SEL_224] (rows=40539971 width=239) + Select Operator [SEL_224] (rows=143966864 width=239) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_223] (rows=40539971 width=239) + Group By Operator [GBY_223] (rows=143966864 width=239) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1 <-Reducer 8 [SIMPLE_EDGE] SHUFFLE [RS_39] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query79.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query79.q.out index fb94428c03..04264e654a 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query79.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query79.q.out @@ -75,13 +75,13 @@ Stage-0 File Output Operator [FS_129] Limit [LIM_128] (rows=100 width=776) Number of rows:100 - Select Operator [SEL_127] (rows=43530621 width=776) + Select Operator [SEL_127] (rows=102592623 width=776) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_32] - Select Operator [SEL_31] (rows=43530621 width=776) + Select Operator [SEL_31] (rows=102592623 width=776) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] - Merge Join Operator [MERGEJOIN_99] (rows=43530621 width=685) + Merge Join Operator [MERGEJOIN_99] (rows=102592623 width=685) Conds:RS_101._col0=RS_126._col1(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col7","_col8"] <-Map 1 [SIMPLE_EDGE] vectorized SHUFFLE [RS_101] @@ -93,9 +93,9 @@ Stage-0 <-Reducer 8 [SIMPLE_EDGE] vectorized SHUFFLE [RS_126] PartitionCols:_col1 - Select Operator [SEL_125] (rows=43530621 width=507) + Select Operator [SEL_125] (rows=102592623 width=507) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Group By Operator [GBY_124] (rows=43530621 width=325) + Group By Operator [GBY_124] (rows=102592623 width=325) Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3 <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_25] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query80.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query80.q.out index dbaecf8fe8..ad1d2a2ba6 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query80.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query80.q.out @@ -258,13 +258,13 @@ Stage-0 File Output Operator [FS_452] Limit [LIM_451] (rows=100 width=619) Number of rows:100 - Select Operator [SEL_450] (rows=38846 width=619) + Select Operator [SEL_450] (rows=59581 width=619) Output:["_col0","_col1","_col2","_col3","_col4"] <-Reducer 9 [SIMPLE_EDGE] vectorized SHUFFLE [RS_449] - Select Operator [SEL_448] (rows=38846 width=619) + Select Operator [SEL_448] (rows=59581 width=619) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_447] (rows=38846 width=627) + Group By Operator [GBY_447] (rows=59581 width=627) Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 8 [SIMPLE_EDGE] <-Reducer 18 [CONTAINS] vectorized @@ -281,7 +281,7 @@ Stage-0 <-Reducer 17 [SIMPLE_EDGE] SHUFFLE [RS_71] PartitionCols:_col0 - Group By Operator [GBY_70] (rows=46000 width=436) + Group By Operator [GBY_70] (rows=427306 width=436) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0 Select Operator [SEL_68] (rows=8592843 width=305) Output:["_col0","_col1","_col2","_col3"] @@ -512,7 +512,7 @@ Stage-0 <-Reducer 6 [SIMPLE_EDGE] SHUFFLE [RS_34] PartitionCols:_col0 - Group By Operator [GBY_33] (rows=1704 width=436) + Group By Operator [GBY_33] (rows=4932 width=436) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0 Select Operator [SEL_31] (rows=15038783 width=100) Output:["_col0","_col1","_col2","_col3"] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query87.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query87.q.out index b6d3904c1c..5db2e0d719 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query87.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query87.q.out @@ -86,20 +86,20 @@ Stage-0 PARTITION_ONLY_SHUFFLE [RS_266] Group By Operator [GBY_265] (rows=1 width=8) Output:["_col0"],aggregations:["count()"] - Select Operator [SEL_264] (rows=8062883 width=16) - Filter Operator [FIL_263] (rows=8062883 width=16) + Select Operator [SEL_264] (rows=27555148 width=16) + Filter Operator [FIL_263] (rows=27555148 width=16) predicate:(((_col3 * 2) = _col4) and (_col3 > 0L)) - Select Operator [SEL_262] (rows=48377300 width=16) + Select Operator [SEL_262] (rows=165330890 width=16) Output:["_col3","_col4"] - Group By Operator [GBY_261] (rows=48377300 width=290) + Group By Operator [GBY_261] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 7 [SIMPLE_EDGE] <-Reducer 18 [CONTAINS] vectorized Reduce Output Operator [RS_292] PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_291] (rows=48377300 width=290) + Group By Operator [GBY_291] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col3)","sum(_col4)"],keys:_col0, _col1, _col2 - Select Operator [SEL_290] (rows=48377300 width=290) + Select Operator [SEL_290] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] Select Operator [SEL_289] (rows=24986582 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] @@ -160,13 +160,13 @@ Stage-0 <-Reducer 6 [CONTAINS] vectorized Reduce Output Operator [RS_260] PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_259] (rows=48377300 width=290) + Group By Operator [GBY_259] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col3)","sum(_col4)"],keys:_col0, _col1, _col2 - Select Operator [SEL_258] (rows=48377300 width=290) + Select Operator [SEL_258] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] - Select Operator [SEL_257] (rows=23390718 width=290) + Select Operator [SEL_257] (rows=140344308 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_256] (rows=23390718 width=282) + Group By Operator [GBY_256] (rows=140344308 width=282) Output:["_col0","_col1","_col2","_col3"],aggregations:["count()"],keys:_col0, _col1, _col2 Select Operator [SEL_255] (rows=23390718 width=290) Output:["_col0","_col1","_col2"] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query95.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query95.q.out index fd709f99dd..4dc9cd8cc0 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query95.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query95.q.out @@ -113,12 +113,12 @@ Stage-0 PARTITION_ONLY_SHUFFLE [RS_267] Group By Operator [GBY_266] (rows=1 width=232) Output:["_col0","_col1","_col2"],aggregations:["count(_col0)","sum(_col1)","sum(_col2)"] - Group By Operator [GBY_265] (rows=143895019 width=228) + Group By Operator [GBY_265] (rows=38111880083 width=228) Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0 <-Reducer 6 [SIMPLE_EDGE] SHUFFLE [RS_101] PartitionCols:_col0 - Group By Operator [GBY_100] (rows=143895019 width=228) + Group By Operator [GBY_100] (rows=38111880083 width=228) Output:["_col0","_col2","_col3"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col3 Merge Join Operator [MERGEJOIN_227] (rows=83469759007 width=227) Conds:RS_47._col3=RS_48._col0(Inner),Output:["_col3","_col4","_col5"] diff --git a/ql/src/test/results/clientpositive/perf/tez/constraints/query97.q.out b/ql/src/test/results/clientpositive/perf/tez/constraints/query97.q.out index 1e15c0decd..d81f62bec7 100644 --- a/ql/src/test/results/clientpositive/perf/tez/constraints/query97.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query97.q.out @@ -82,14 +82,14 @@ Stage-0 PARTITION_ONLY_SHUFFLE [RS_34] Group By Operator [GBY_33] (rows=1 width=24) Output:["_col0","_col1","_col2"],aggregations:["sum(_col0)","sum(_col1)","sum(_col2)"] - Select Operator [SEL_31] (rows=19216436912 width=7) + Select Operator [SEL_31] (rows=641522732397 width=7) Output:["_col0","_col1","_col2"] - Merge Join Operator [MERGEJOIN_69] (rows=19216436912 width=7) + Merge Join Operator [MERGEJOIN_69] (rows=641522732397 width=7) Conds:RS_86._col0, _col1=RS_93._col0, _col1(Outer),Output:["_col0","_col2"] <-Reducer 3 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_86] PartitionCols:_col0, _col1 - Group By Operator [GBY_85] (rows=95493908 width=6) + Group By Operator [GBY_85] (rows=550076554 width=6) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_11] @@ -130,7 +130,7 @@ Stage-0 <-Reducer 9 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_93] PartitionCols:_col0, _col1 - Group By Operator [GBY_92] (rows=49393705 width=7) + Group By Operator [GBY_92] (rows=286549727 width=7) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Reducer 8 [SIMPLE_EDGE] SHUFFLE [RS_25] diff --git a/ql/src/test/results/clientpositive/perf/tez/query11.q.out b/ql/src/test/results/clientpositive/perf/tez/query11.q.out index da1c349ff0..3518fe7893 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query11.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query11.q.out @@ -192,15 +192,15 @@ Stage-0 File Output Operator [FS_354] Limit [LIM_353] (rows=100 width=85) Number of rows:100 - Select Operator [SEL_352] (rows=12248093 width=85) + Select Operator [SEL_352] (rows=29679601 width=85) Output:["_col0"] <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_93] - Select Operator [SEL_92] (rows=12248093 width=85) + Select Operator [SEL_92] (rows=29679601 width=85) Output:["_col0"] - Filter Operator [FIL_91] (rows=12248093 width=537) + Filter Operator [FIL_91] (rows=29679601 width=537) predicate:CASE WHEN (_col3 is not null) THEN (CASE WHEN (_col6) THEN (((_col1 / _col5) > (_col9 / _col3))) ELSE ((null > (_col9 / _col3))) END) ELSE (CASE WHEN (_col6) THEN (((_col1 / _col5) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_287] (rows=24496186 width=537) + Merge Join Operator [MERGEJOIN_287] (rows=59359203 width=537) Conds:RS_88._col2=RS_351._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col8","_col9"] <-Reducer 20 [SIMPLE_EDGE] vectorized SHUFFLE [RS_351] @@ -262,18 +262,18 @@ Stage-0 <-Reducer 6 [ONE_TO_ONE_EDGE] FORWARD [RS_88] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_286] (rows=20485011 width=440) + Merge Join Operator [MERGEJOIN_286] (rows=49639315 width=440) Conds:RS_85._col2=RS_343._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6"] <-Reducer 16 [SIMPLE_EDGE] vectorized SHUFFLE [RS_343] PartitionCols:_col0 - Select Operator [SEL_342] (rows=17130654 width=216) + Select Operator [SEL_342] (rows=26666666 width=216) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_341] (rows=17130654 width=212) + Filter Operator [FIL_341] (rows=26666666 width=212) predicate:(_col7 > 0) - Select Operator [SEL_340] (rows=51391963 width=212) + Select Operator [SEL_340] (rows=80000000 width=212) Output:["_col0","_col7"] - Group By Operator [GBY_339] (rows=51391963 width=764) + Group By Operator [GBY_339] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 15 [SIMPLE_EDGE] SHUFFLE [RS_58] @@ -322,7 +322,7 @@ Stage-0 <-Reducer 5 [ONE_TO_ONE_EDGE] FORWARD [RS_85] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_285] (rows=31888273 width=324) + Merge Join Operator [MERGEJOIN_285] (rows=49639315 width=324) Conds:RS_323._col0=RS_333._col0(Inner),Output:["_col1","_col2","_col3"] <-Reducer 12 [SIMPLE_EDGE] vectorized SHUFFLE [RS_333] @@ -382,9 +382,9 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] vectorized SHUFFLE [RS_323] PartitionCols:_col0 - Select Operator [SEL_322] (rows=51391963 width=212) + Select Operator [SEL_322] (rows=80000000 width=212) Output:["_col0","_col1"] - Group By Operator [GBY_321] (rows=51391963 width=764) + Group By Operator [GBY_321] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_17] diff --git a/ql/src/test/results/clientpositive/perf/tez/query33.q.out b/ql/src/test/results/clientpositive/perf/tez/query33.q.out index 6c4798eed5..c6cfa309b3 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query33.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query33.q.out @@ -195,13 +195,13 @@ Stage-0 Stage-1 Reducer 7 vectorized File Output Operator [FS_368] - Limit [LIM_367] (rows=59 width=115) + Limit [LIM_367] (rows=60 width=115) Number of rows:100 - Select Operator [SEL_366] (rows=59 width=115) + Select Operator [SEL_366] (rows=60 width=115) Output:["_col0","_col1"] <-Reducer 6 [SIMPLE_EDGE] vectorized SHUFFLE [RS_365] - Group By Operator [GBY_364] (rows=59 width=115) + Group By Operator [GBY_364] (rows=60 width=115) Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0 <-Union 5 [SIMPLE_EDGE] <-Reducer 11 [CONTAINS] vectorized diff --git a/ql/src/test/results/clientpositive/perf/tez/query34.q.out b/ql/src/test/results/clientpositive/perf/tez/query34.q.out index fd60a3a6bd..194c552d51 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query34.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query34.q.out @@ -112,9 +112,9 @@ Stage-0 PartitionCols:_col1 Filter Operator [FIL_133] (rows=6 width=12) predicate:_col2 BETWEEN 15 AND 20 - Select Operator [SEL_132] (rows=5521356 width=12) + Select Operator [SEL_132] (rows=479121995 width=12) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_131] (rows=5521356 width=12) + Group By Operator [GBY_131] (rows=479121995 width=12) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_26] diff --git a/ql/src/test/results/clientpositive/perf/tez/query4.q.out b/ql/src/test/results/clientpositive/perf/tez/query4.q.out index bb0d7ba3f5..126b9af612 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query4.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query4.q.out @@ -274,15 +274,15 @@ Stage-0 File Output Operator [FS_557] Limit [LIM_556] (rows=100 width=85) Number of rows:100 - Select Operator [SEL_555] (rows=7323197 width=85) + Select Operator [SEL_555] (rows=17745586 width=85) Output:["_col0"] <-Reducer 9 [SIMPLE_EDGE] SHUFFLE [RS_141] - Select Operator [SEL_140] (rows=7323197 width=85) + Select Operator [SEL_140] (rows=17745586 width=85) Output:["_col0"] - Filter Operator [FIL_139] (rows=7323197 width=537) + Filter Operator [FIL_139] (rows=17745586 width=537) predicate:CASE WHEN (_col3 is not null) THEN (CASE WHEN (_col9) THEN (((_col11 / _col8) > (_col14 / _col3))) ELSE ((null > (_col14 / _col3))) END) ELSE (CASE WHEN (_col9) THEN (((_col11 / _col8) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_472] (rows=14646395 width=537) + Merge Join Operator [MERGEJOIN_472] (rows=35491172 width=537) Conds:RS_136._col2=RS_554._col0(Inner),Output:["_col3","_col8","_col9","_col11","_col13","_col14"] <-Reducer 30 [SIMPLE_EDGE] vectorized SHUFFLE [RS_554] @@ -344,9 +344,9 @@ Stage-0 <-Reducer 8 [ONE_TO_ONE_EDGE] FORWARD [RS_136] PartitionCols:_col2 - Filter Operator [FIL_135] (rows=12248093 width=668) + Filter Operator [FIL_135] (rows=29679601 width=668) predicate:CASE WHEN (_col6) THEN (CASE WHEN (_col9) THEN (((_col11 / _col8) > (_col1 / _col5))) ELSE ((null > (_col1 / _col5))) END) ELSE (CASE WHEN (_col9) THEN (((_col11 / _col8) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_471] (rows=24496186 width=668) + Merge Join Operator [MERGEJOIN_471] (rows=59359203 width=668) Conds:RS_132._col2=RS_548._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col8","_col9","_col11"] <-Reducer 26 [SIMPLE_EDGE] vectorized SHUFFLE [RS_548] @@ -398,7 +398,7 @@ Stage-0 <-Reducer 7 [ONE_TO_ONE_EDGE] FORWARD [RS_132] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_470] (rows=20485011 width=556) + Merge Join Operator [MERGEJOIN_470] (rows=49639315 width=556) Conds:RS_129._col2=RS_542._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col8","_col9"] <-Reducer 22 [SIMPLE_EDGE] vectorized SHUFFLE [RS_542] @@ -458,18 +458,18 @@ Stage-0 <-Reducer 6 [ONE_TO_ONE_EDGE] FORWARD [RS_129] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_469] (rows=20485011 width=440) + Merge Join Operator [MERGEJOIN_469] (rows=49639315 width=440) Conds:RS_126._col2=RS_534._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6"] <-Reducer 18 [SIMPLE_EDGE] vectorized SHUFFLE [RS_534] PartitionCols:_col0 - Select Operator [SEL_533] (rows=17130654 width=216) + Select Operator [SEL_533] (rows=26666666 width=216) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_532] (rows=17130654 width=212) + Filter Operator [FIL_532] (rows=26666666 width=212) predicate:(_col7 > 0) - Select Operator [SEL_531] (rows=51391963 width=212) + Select Operator [SEL_531] (rows=80000000 width=212) Output:["_col0","_col7"] - Group By Operator [GBY_530] (rows=51391963 width=764) + Group By Operator [GBY_530] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 17 [SIMPLE_EDGE] SHUFFLE [RS_58] @@ -518,7 +518,7 @@ Stage-0 <-Reducer 5 [ONE_TO_ONE_EDGE] FORWARD [RS_126] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_468] (rows=31888273 width=324) + Merge Join Operator [MERGEJOIN_468] (rows=49639315 width=324) Conds:RS_518._col0=RS_526._col0(Inner),Output:["_col1","_col2","_col3"] <-Reducer 14 [SIMPLE_EDGE] vectorized SHUFFLE [RS_526] @@ -578,9 +578,9 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] vectorized SHUFFLE [RS_518] PartitionCols:_col0 - Select Operator [SEL_517] (rows=51391963 width=212) + Select Operator [SEL_517] (rows=80000000 width=212) Output:["_col0","_col1"] - Group By Operator [GBY_516] (rows=51391963 width=764) + Group By Operator [GBY_516] (rows=80000000 width=764) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6 <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_17] diff --git a/ql/src/test/results/clientpositive/perf/tez/query5.q.out b/ql/src/test/results/clientpositive/perf/tez/query5.q.out index 2ce689b1bb..35acfc82b3 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query5.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query5.q.out @@ -306,13 +306,13 @@ Stage-0 File Output Operator [FS_304] Limit [LIM_303] (rows=100 width=619) Number of rows:100 - Select Operator [SEL_302] (rows=38846 width=619) + Select Operator [SEL_302] (rows=59581 width=619) Output:["_col0","_col1","_col2","_col3","_col4"] <-Reducer 7 [SIMPLE_EDGE] vectorized SHUFFLE [RS_301] - Select Operator [SEL_300] (rows=38846 width=619) + Select Operator [SEL_300] (rows=59581 width=619) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_299] (rows=38846 width=627) + Group By Operator [GBY_299] (rows=59581 width=627) Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 6 [SIMPLE_EDGE] <-Reducer 14 [CONTAINS] vectorized @@ -329,7 +329,7 @@ Stage-0 <-Reducer 13 [SIMPLE_EDGE] SHUFFLE [RS_47] PartitionCols:_col0 - Group By Operator [GBY_46] (rows=46000 width=548) + Group By Operator [GBY_46] (rows=2835758 width=548) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col8 Merge Join Operator [MERGEJOIN_222] (rows=34813117 width=535) Conds:RS_42._col0=RS_310._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col8"] @@ -400,7 +400,7 @@ Stage-0 <-Reducer 17 [SIMPLE_EDGE] SHUFFLE [RS_80] PartitionCols:_col0 - Group By Operator [GBY_79] (rows=84 width=548) + Group By Operator [GBY_79] (rows=3498 width=548) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col8 Merge Join Operator [MERGEJOIN_224] (rows=30966059 width=543) Conds:RS_75._col0=RS_318._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col8"] @@ -482,7 +482,7 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] SHUFFLE [RS_22] PartitionCols:_col0 - Group By Operator [GBY_21] (rows=1704 width=548) + Group By Operator [GBY_21] (rows=78090 width=548) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col4)","sum(_col3)","sum(_col5)"],keys:_col8 Merge Join Operator [MERGEJOIN_220] (rows=64325014 width=376) Conds:RS_17._col0=RS_293._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col8"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query70.q.out b/ql/src/test/results/clientpositive/perf/tez/query70.q.out index 4650ef66fc..6d2528e474 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query70.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query70.q.out @@ -103,22 +103,22 @@ Stage-0 File Output Operator [FS_168] Limit [LIM_167] (rows=100 width=492) Number of rows:100 - Select Operator [SEL_166] (rows=240 width=492) + Select Operator [SEL_166] (rows=720 width=492) Output:["_col0","_col1","_col2","_col3","_col4"] <-Reducer 6 [SIMPLE_EDGE] vectorized SHUFFLE [RS_165] - Select Operator [SEL_164] (rows=240 width=492) + Select Operator [SEL_164] (rows=720 width=492) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - PTF Operator [PTF_163] (rows=240 width=304) + PTF Operator [PTF_163] (rows=720 width=304) Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col2 DESC NULLS LAST","partition by:":"(grouping(_col3, 1) + grouping(_col3, 0)), CASE WHEN ((grouping(_col3, 0) = 0)) THEN (_col0) ELSE (CAST( null AS STRING)) END"}] - Select Operator [SEL_162] (rows=240 width=304) + Select Operator [SEL_162] (rows=720 width=304) Output:["_col0","_col1","_col2","_col3"] <-Reducer 5 [SIMPLE_EDGE] vectorized SHUFFLE [RS_161] PartitionCols:(grouping(_col3, 1) + grouping(_col3, 0)), CASE WHEN ((grouping(_col3, 0) = 0)) THEN (_col0) ELSE (CAST( null AS STRING)) END - Select Operator [SEL_160] (rows=240 width=304) + Select Operator [SEL_160] (rows=720 width=304) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_159] (rows=240 width=304) + Group By Operator [GBY_159] (rows=720 width=304) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 4 [SIMPLE_EDGE] SHUFFLE [RS_49] @@ -148,7 +148,7 @@ Stage-0 <-Reducer 8 [SIMPLE_EDGE] SHUFFLE [RS_26] PartitionCols:_col0 - Group By Operator [GBY_25] (rows=1704 width=198) + Group By Operator [GBY_25] (rows=2989 width=198) Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col5 Merge Join Operator [MERGEJOIN_133] (rows=91197860 width=168) Conds:RS_21._col1=RS_151._col0(Inner),Output:["_col2","_col5"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query73.q.out b/ql/src/test/results/clientpositive/perf/tez/query73.q.out index a3ec37822f..b526f47ec5 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query73.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query73.q.out @@ -106,9 +106,9 @@ Stage-0 PartitionCols:_col1 Filter Operator [FIL_133] (rows=5 width=12) predicate:_col2 BETWEEN 1 AND 5 - Select Operator [SEL_132] (rows=788766 width=12) + Select Operator [SEL_132] (rows=215975848 width=12) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_131] (rows=788766 width=12) + Group By Operator [GBY_131] (rows=215975848 width=12) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_26] diff --git a/ql/src/test/results/clientpositive/perf/tez/query74.q.out b/ql/src/test/results/clientpositive/perf/tez/query74.q.out index 20aade2974..6d002c68f0 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query74.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query74.q.out @@ -164,15 +164,15 @@ Stage-0 File Output Operator [FS_353] Limit [LIM_352] (rows=100 width=280) Number of rows:100 - Select Operator [SEL_351] (rows=12248093 width=280) + Select Operator [SEL_351] (rows=29679601 width=280) Output:["_col0","_col1","_col2"] <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_93] - Select Operator [SEL_92] (rows=12248093 width=280) + Select Operator [SEL_92] (rows=29679601 width=280) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_91] (rows=12248093 width=732) + Filter Operator [FIL_91] (rows=29679601 width=732) predicate:CASE WHEN (_col3 is not null) THEN (CASE WHEN (_col6) THEN (((_col1 / _col5) > (_col10 / _col3))) ELSE ((null > (_col10 / _col3))) END) ELSE (CASE WHEN (_col6) THEN (((_col1 / _col5) > null)) ELSE (null) END) END - Merge Join Operator [MERGEJOIN_287] (rows=24496186 width=732) + Merge Join Operator [MERGEJOIN_287] (rows=59359203 width=732) Conds:RS_88._col2=RS_350._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col7","_col8","_col9","_col10"] <-Reducer 20 [SIMPLE_EDGE] vectorized SHUFFLE [RS_350] @@ -232,18 +232,18 @@ Stage-0 <-Reducer 6 [ONE_TO_ONE_EDGE] FORWARD [RS_88] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_286] (rows=20485011 width=440) + Merge Join Operator [MERGEJOIN_286] (rows=49639315 width=440) Conds:RS_85._col2=RS_343._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6"] <-Reducer 16 [SIMPLE_EDGE] vectorized SHUFFLE [RS_343] PartitionCols:_col0 - Select Operator [SEL_342] (rows=17130654 width=216) + Select Operator [SEL_342] (rows=26666666 width=216) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_341] (rows=17130654 width=212) + Filter Operator [FIL_341] (rows=26666666 width=212) predicate:(_col3 > 0) - Select Operator [SEL_340] (rows=51391963 width=212) + Select Operator [SEL_340] (rows=80000000 width=212) Output:["_col0","_col3"] - Group By Operator [GBY_339] (rows=51391963 width=392) + Group By Operator [GBY_339] (rows=80000000 width=392) Output:["_col0","_col1","_col2","_col3"],aggregations:["max(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 15 [SIMPLE_EDGE] SHUFFLE [RS_58] @@ -292,7 +292,7 @@ Stage-0 <-Reducer 5 [ONE_TO_ONE_EDGE] FORWARD [RS_85] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_285] (rows=31888273 width=324) + Merge Join Operator [MERGEJOIN_285] (rows=49639315 width=324) Conds:RS_323._col0=RS_333._col0(Inner),Output:["_col1","_col2","_col3"] <-Reducer 12 [SIMPLE_EDGE] vectorized SHUFFLE [RS_333] @@ -352,9 +352,9 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] vectorized SHUFFLE [RS_323] PartitionCols:_col0 - Select Operator [SEL_322] (rows=51391963 width=212) + Select Operator [SEL_322] (rows=80000000 width=212) Output:["_col0","_col1"] - Group By Operator [GBY_321] (rows=51391963 width=392) + Group By Operator [GBY_321] (rows=80000000 width=392) Output:["_col0","_col1","_col2","_col3"],aggregations:["max(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_17] diff --git a/ql/src/test/results/clientpositive/perf/tez/query78.q.out b/ql/src/test/results/clientpositive/perf/tez/query78.q.out index 3d938e1c8e..f25cce0ca9 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query78.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query78.q.out @@ -158,22 +158,22 @@ Stage-0 File Output Operator [FS_238] Limit [LIM_237] (rows=100 width=484) Number of rows:100 - Select Operator [SEL_236] (rows=232433056973 width=483) + Select Operator [SEL_236] (rows=11141570439055 width=483) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"] <-Reducer 5 [SIMPLE_EDGE] SHUFFLE [RS_73] - Select Operator [SEL_72] (rows=232433056973 width=719) + Select Operator [SEL_72] (rows=11141570439055 width=719) Output:["_col0","_col1","_col6","_col7","_col8","_col9","_col10","_col11","_col12"] - Filter Operator [FIL_71] (rows=232433056973 width=701) + Filter Operator [FIL_71] (rows=11141570439055 width=703) predicate:CASE WHEN (_col11 is not null) THEN ((_col11 > 0L)) ELSE (false) END - Merge Join Operator [MERGEJOIN_191] (rows=464866113946 width=701) + Merge Join Operator [MERGEJOIN_191] (rows=22283140878110 width=703) Conds:RS_68._col1=RS_235._col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9","_col11","_col12","_col13"] <-Reducer 12 [SIMPLE_EDGE] vectorized SHUFFLE [RS_235] PartitionCols:_col0 - Select Operator [SEL_234] (rows=50796051 width=235) + Select Operator [SEL_234] (rows=143274863 width=235) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_233] (rows=50796051 width=239) + Group By Operator [GBY_233] (rows=143274863 width=239) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1 <-Reducer 11 [SIMPLE_EDGE] SHUFFLE [RS_65] @@ -230,16 +230,16 @@ Stage-0 <-Reducer 4 [SIMPLE_EDGE] SHUFFLE [RS_68] PartitionCols:_col1 - Filter Operator [FIL_45] (rows=4580227799 width=471) + Filter Operator [FIL_45] (rows=78336887605 width=471) predicate:CASE WHEN (_col7 is not null) THEN ((_col7 > 0L)) ELSE (false) END - Merge Join Operator [MERGEJOIN_190] (rows=9160455599 width=471) + Merge Join Operator [MERGEJOIN_190] (rows=156673775210 width=471) Conds:RS_215._col1, _col0=RS_225._col1, _col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9"] <-Reducer 3 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_215] PartitionCols:_col1, _col0 - Select Operator [SEL_214] (rows=114214965 width=239) + Select Operator [SEL_214] (rows=550076554 width=239) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_213] (rows=114214965 width=239) + Group By Operator [GBY_213] (rows=550076554 width=239) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_18] @@ -291,9 +291,9 @@ Stage-0 <-Reducer 9 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_225] PartitionCols:_col1, _col0 - Select Operator [SEL_224] (rows=40539971 width=239) + Select Operator [SEL_224] (rows=143966864 width=239) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_223] (rows=40539971 width=239) + Group By Operator [GBY_223] (rows=143966864 width=239) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1 <-Reducer 8 [SIMPLE_EDGE] SHUFFLE [RS_39] diff --git a/ql/src/test/results/clientpositive/perf/tez/query79.q.out b/ql/src/test/results/clientpositive/perf/tez/query79.q.out index 877ff7bc77..e411a56fb0 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query79.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query79.q.out @@ -75,13 +75,13 @@ Stage-0 File Output Operator [FS_131] Limit [LIM_130] (rows=100 width=776) Number of rows:100 - Select Operator [SEL_129] (rows=43530621 width=776) + Select Operator [SEL_129] (rows=102592623 width=776) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_33] - Select Operator [SEL_32] (rows=43530621 width=776) + Select Operator [SEL_32] (rows=102592623 width=776) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] - Merge Join Operator [MERGEJOIN_100] (rows=43530621 width=685) + Merge Join Operator [MERGEJOIN_100] (rows=102592623 width=685) Conds:RS_103._col0=RS_128._col1(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col7","_col8"] <-Map 1 [SIMPLE_EDGE] vectorized SHUFFLE [RS_103] @@ -95,9 +95,9 @@ Stage-0 <-Reducer 8 [SIMPLE_EDGE] vectorized SHUFFLE [RS_128] PartitionCols:_col1 - Select Operator [SEL_127] (rows=43530621 width=507) + Select Operator [SEL_127] (rows=102592623 width=507) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Group By Operator [GBY_126] (rows=43530621 width=325) + Group By Operator [GBY_126] (rows=102592623 width=325) Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3 <-Reducer 7 [SIMPLE_EDGE] SHUFFLE [RS_26] diff --git a/ql/src/test/results/clientpositive/perf/tez/query80.q.out b/ql/src/test/results/clientpositive/perf/tez/query80.q.out index 223e61aa24..b58acc015f 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query80.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query80.q.out @@ -258,13 +258,13 @@ Stage-0 File Output Operator [FS_460] Limit [LIM_459] (rows=100 width=619) Number of rows:100 - Select Operator [SEL_458] (rows=38846 width=619) + Select Operator [SEL_458] (rows=59581 width=619) Output:["_col0","_col1","_col2","_col3","_col4"] <-Reducer 9 [SIMPLE_EDGE] vectorized SHUFFLE [RS_457] - Select Operator [SEL_456] (rows=38846 width=619) + Select Operator [SEL_456] (rows=59581 width=619) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_455] (rows=38846 width=627) + Group By Operator [GBY_455] (rows=59581 width=627) Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 8 [SIMPLE_EDGE] <-Reducer 18 [CONTAINS] vectorized @@ -281,7 +281,7 @@ Stage-0 <-Reducer 17 [SIMPLE_EDGE] SHUFFLE [RS_75] PartitionCols:_col0 - Group By Operator [GBY_74] (rows=46000 width=436) + Group By Operator [GBY_74] (rows=427306 width=436) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0 Select Operator [SEL_72] (rows=8592843 width=305) Output:["_col0","_col1","_col2","_col3"] @@ -520,7 +520,7 @@ Stage-0 <-Reducer 6 [SIMPLE_EDGE] SHUFFLE [RS_36] PartitionCols:_col0 - Group By Operator [GBY_35] (rows=1704 width=436) + Group By Operator [GBY_35] (rows=4932 width=436) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0 Select Operator [SEL_33] (rows=15038783 width=100) Output:["_col0","_col1","_col2","_col3"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query87.q.out b/ql/src/test/results/clientpositive/perf/tez/query87.q.out index 5b59116741..8caa03d420 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query87.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query87.q.out @@ -86,20 +86,20 @@ Stage-0 PARTITION_ONLY_SHUFFLE [RS_270] Group By Operator [GBY_269] (rows=1 width=8) Output:["_col0"],aggregations:["count()"] - Select Operator [SEL_268] (rows=8062883 width=16) - Filter Operator [FIL_267] (rows=8062883 width=16) + Select Operator [SEL_268] (rows=27555148 width=16) + Filter Operator [FIL_267] (rows=27555148 width=16) predicate:(((_col3 * 2) = _col4) and (_col3 > 0L)) - Select Operator [SEL_266] (rows=48377300 width=16) + Select Operator [SEL_266] (rows=165330890 width=16) Output:["_col3","_col4"] - Group By Operator [GBY_265] (rows=48377300 width=290) + Group By Operator [GBY_265] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 7 [SIMPLE_EDGE] <-Reducer 18 [CONTAINS] vectorized Reduce Output Operator [RS_296] PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_295] (rows=48377300 width=290) + Group By Operator [GBY_295] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col3)","sum(_col4)"],keys:_col0, _col1, _col2 - Select Operator [SEL_294] (rows=48377300 width=290) + Select Operator [SEL_294] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] Select Operator [SEL_293] (rows=24986582 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] @@ -162,13 +162,13 @@ Stage-0 <-Reducer 6 [CONTAINS] vectorized Reduce Output Operator [RS_264] PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_263] (rows=48377300 width=290) + Group By Operator [GBY_263] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col3)","sum(_col4)"],keys:_col0, _col1, _col2 - Select Operator [SEL_262] (rows=48377300 width=290) + Select Operator [SEL_262] (rows=165330890 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] - Select Operator [SEL_261] (rows=23390718 width=290) + Select Operator [SEL_261] (rows=140344308 width=290) Output:["_col0","_col1","_col2","_col3","_col4"] - Group By Operator [GBY_260] (rows=23390718 width=282) + Group By Operator [GBY_260] (rows=140344308 width=282) Output:["_col0","_col1","_col2","_col3"],aggregations:["count()"],keys:_col0, _col1, _col2 Select Operator [SEL_259] (rows=23390718 width=290) Output:["_col0","_col1","_col2"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query97.q.out b/ql/src/test/results/clientpositive/perf/tez/query97.q.out index 8cd15fb1a2..7e427c1319 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query97.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query97.q.out @@ -82,14 +82,14 @@ Stage-0 PARTITION_ONLY_SHUFFLE [RS_34] Group By Operator [GBY_33] (rows=1 width=24) Output:["_col0","_col1","_col2"],aggregations:["sum(_col0)","sum(_col1)","sum(_col2)"] - Select Operator [SEL_31] (rows=19216436912 width=7) + Select Operator [SEL_31] (rows=641522732397 width=7) Output:["_col0","_col1","_col2"] - Merge Join Operator [MERGEJOIN_69] (rows=19216436912 width=7) + Merge Join Operator [MERGEJOIN_69] (rows=641522732397 width=7) Conds:RS_86._col0, _col1=RS_93._col0, _col1(Outer),Output:["_col0","_col2"] <-Reducer 3 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_86] PartitionCols:_col0, _col1 - Group By Operator [GBY_85] (rows=95493908 width=6) + Group By Operator [GBY_85] (rows=550076554 width=6) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_11] @@ -130,7 +130,7 @@ Stage-0 <-Reducer 9 [ONE_TO_ONE_EDGE] vectorized FORWARD [RS_93] PartitionCols:_col0, _col1 - Group By Operator [GBY_92] (rows=49393705 width=7) + Group By Operator [GBY_92] (rows=286549727 width=7) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Reducer 8 [SIMPLE_EDGE] SHUFFLE [RS_25] diff --git a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out index 1f681944cd..b0d008cfe9 100644 --- a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out @@ -321,9 +321,9 @@ Stage-0 Stage-1 Reducer 2 File Output Operator [FS_6] - Select Operator [SEL_5] (rows=10 width=97) + Select Operator [SEL_5] (rows=20 width=92) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [GROUP] GROUP [RS_3] @@ -357,18 +357,18 @@ Stage-0 Stage-1 Reducer 3 File Output Operator [FS_11] - Select Operator [SEL_10] (rows=5 width=20) + Select Operator [SEL_10] (rows=10 width=20) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_9] (rows=5 width=20) + Group By Operator [GBY_9] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [GROUP] GROUP [RS_8] PartitionCols:_col0, _col1 - Group By Operator [GBY_7] (rows=5 width=20) + Group By Operator [GBY_7] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col0, _col1 - Select Operator [SEL_5] (rows=10 width=101) + Select Operator [SEL_5] (rows=20 width=96) Output:["_col0","_col1"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [GROUP] GROUP [RS_3] @@ -428,7 +428,7 @@ Stage-0 Output:["_col1","_col5"] Filter Operator [FIL_21] (rows=1 width=24) predicate:(((_col4 + _col7) >= 0) and ((_col4 > 0) or _col2)) - Join Operator [JOIN_20] (rows=3 width=22) + Join Operator [JOIN_20] (rows=7 width=23) Output:["_col1","_col2","_col4","_col5","_col7"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"} <-Map 1 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_17] @@ -458,9 +458,9 @@ Stage-0 <-Reducer 8 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_19] PartitionCols:_col0 - Select Operator [SEL_16] (rows=2 width=89) + Select Operator [SEL_16] (rows=5 width=89) Output:["_col0","_col1"] - Group By Operator [GBY_15] (rows=2 width=93) + Group By Operator [GBY_15] (rows=5 width=93) Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 7 [GROUP] GROUP [RS_14] @@ -1370,13 +1370,13 @@ Stage-0 File Output Operator [FS_9] Limit [LIM_8] (rows=1 width=97) Number of rows:1 - Select Operator [SEL_7] (rows=10 width=97) + Select Operator [SEL_7] (rows=20 width=92) Output:["_col0","_col1","_col2"] <-Reducer 2 [SORT] SORT [RS_6] - Select Operator [SEL_5] (rows=10 width=97) + Select Operator [SEL_5] (rows=20 width=92) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [GROUP] GROUP [RS_3] @@ -1413,20 +1413,20 @@ Stage-0 File Output Operator [FS_14] Limit [LIM_13] (rows=1 width=20) Number of rows:1 - Select Operator [SEL_12] (rows=5 width=20) + Select Operator [SEL_12] (rows=10 width=20) Output:["_col0","_col1","_col2"] <-Reducer 3 [SORT] SORT [RS_11] - Group By Operator [GBY_9] (rows=5 width=20) + Group By Operator [GBY_9] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [GROUP] GROUP [RS_8] PartitionCols:_col0, _col1 - Group By Operator [GBY_7] (rows=5 width=20) + Group By Operator [GBY_7] (rows=10 width=20) Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col0 - Select Operator [SEL_5] (rows=10 width=101) + Select Operator [SEL_5] (rows=20 width=96) Output:["_col0","_col1"] - Group By Operator [GBY_4] (rows=10 width=101) + Group By Operator [GBY_4] (rows=20 width=96) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [GROUP] GROUP [RS_3] @@ -1539,26 +1539,26 @@ Stage-0 Stage-1 Reducer 6 File Output Operator [FS_44] - Limit [LIM_42] (rows=1 width=28) + Limit [LIM_42] (rows=4 width=27) Number of rows:5 - Select Operator [SEL_41] (rows=1 width=28) + Select Operator [SEL_41] (rows=4 width=27) Output:["_col0","_col1","_col2"] <-Reducer 5 [SORT] SORT [RS_40] - Select Operator [SEL_39] (rows=1 width=28) + Select Operator [SEL_39] (rows=4 width=27) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_38] (rows=1 width=20) + Group By Operator [GBY_38] (rows=4 width=19) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 4 [GROUP] GROUP [RS_37] PartitionCols:_col0, _col1 - Group By Operator [GBY_36] (rows=1 width=20) + Group By Operator [GBY_36] (rows=2 width=20) Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col4, _col7 - Select Operator [SEL_35] (rows=2 width=28) + Select Operator [SEL_35] (rows=4 width=28) Output:["_col4","_col7"] - Filter Operator [FIL_33] (rows=2 width=28) + Filter Operator [FIL_33] (rows=4 width=28) predicate:(((_col3 + _col1) >= 0) and (_col5 or _col8)) - Join Operator [JOIN_32] (rows=6 width=27) + Join Operator [JOIN_32] (rows=13 width=27) Output:["_col1","_col3","_col4","_col5","_col7","_col8"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"} <-Map 10 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_31] @@ -1572,17 +1572,17 @@ Stage-0 <-Reducer 3 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_29] PartitionCols:_col0 - Filter Operator [FIL_11] (rows=2 width=105) + Filter Operator [FIL_11] (rows=3 width=105) predicate:_col0 is not null - Limit [LIM_9] (rows=3 width=105) + Limit [LIM_9] (rows=5 width=88) Number of rows:5 - Select Operator [SEL_8] (rows=3 width=105) + Select Operator [SEL_8] (rows=6 width=90) Output:["_col0","_col1"] <-Reducer 2 [SORT] SORT [RS_7] - Select Operator [SEL_6] (rows=3 width=105) + Select Operator [SEL_6] (rows=6 width=90) Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_5] (rows=3 width=101) + Group By Operator [GBY_5] (rows=6 width=85) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 1 [GROUP] GROUP [RS_4] @@ -1596,19 +1596,19 @@ Stage-0 <-Reducer 9 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_30] PartitionCols:_col0 - Select Operator [SEL_25] (rows=2 width=101) + Select Operator [SEL_25] (rows=3 width=101) Output:["_col0","_col1","_col2","_col3"] - Filter Operator [FIL_24] (rows=2 width=97) + Filter Operator [FIL_24] (rows=3 width=97) predicate:_col0 is not null - Limit [LIM_22] (rows=3 width=97) + Limit [LIM_22] (rows=5 width=80) Number of rows:5 - Select Operator [SEL_21] (rows=3 width=97) + Select Operator [SEL_21] (rows=6 width=82) Output:["_col0","_col1","_col2"] <-Reducer 8 [SORT] SORT [RS_20] - Select Operator [SEL_19] (rows=3 width=97) + Select Operator [SEL_19] (rows=6 width=82) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_18] (rows=3 width=101) + Group By Operator [GBY_18] (rows=6 width=85) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Map 7 [GROUP] GROUP [RS_17] @@ -1987,9 +1987,9 @@ Stage-0 Output:["_col0","_col1"] Group By Operator [GBY_12] (rows=83 width=91) Output:["_col0"],keys:_col1 - Select Operator [SEL_8] (rows=83 width=178) + Select Operator [SEL_8] (rows=166 width=178) Output:["_col1"] - Group By Operator [GBY_7] (rows=83 width=178) + Group By Operator [GBY_7] (rows=166 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Map 3 [GROUP PARTITION-LEVEL SORT] GROUP PARTITION-LEVEL SORT [RS_6] @@ -2036,16 +2036,16 @@ Stage-0 Stage-1 Reducer 3 File Output Operator [FS_18] - Select Operator [SEL_17] (rows=250 width=178) + Select Operator [SEL_17] (rows=500 width=178) Output:["_col0","_col1"] - Filter Operator [FIL_16] (rows=250 width=179) + Filter Operator [FIL_16] (rows=500 width=179) predicate:_col4 is null - Join Operator [JOIN_15] (rows=333 width=179) + Join Operator [JOIN_15] (rows=832 width=179) Output:["_col0","_col1","_col4"],condition map:[{"":"{\"type\":\"Left Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0, _col1","1":"_col0, _col1"} <-Reducer 2 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_13] PartitionCols:_col0, _col1 - Group By Operator [GBY_4] (rows=250 width=178) + Group By Operator [GBY_4] (rows=500 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Map 1 [GROUP] GROUP [RS_3] @@ -2059,9 +2059,9 @@ Stage-0 <-Reducer 5 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_14] PartitionCols:_col0, _col1 - Select Operator [SEL_12] (rows=83 width=182) + Select Operator [SEL_12] (rows=166 width=182) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_11] (rows=83 width=178) + Group By Operator [GBY_11] (rows=166 width=178) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Map 4 [GROUP] GROUP [RS_10] @@ -2348,9 +2348,9 @@ Stage-0 <-Reducer 4 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_29] PartitionCols:_col2 - Filter Operator [FIL_17] (rows=65 width=186) + Filter Operator [FIL_17] (rows=166 width=186) predicate:_col2 is not null - Group By Operator [GBY_16] (rows=65 width=186) + Group By Operator [GBY_16] (rows=166 width=186) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 3 [GROUP] GROUP [RS_15] @@ -2434,14 +2434,14 @@ Stage-0 Stage-1 Reducer 3 File Output Operator [FS_21] - Join Operator [JOIN_19] (rows=13 width=227) + Join Operator [JOIN_19] (rows=26 width=227) Output:["_col0","_col1","_col2"],condition map:[{"":"{\"type\":\"Left Semi\",\"left\":0,\"right\":1}"}],keys:{"0":"_col1","1":"_col0"} <-Reducer 2 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_17] PartitionCols:_col1 - Select Operator [SEL_6] (rows=13 width=227) + Select Operator [SEL_6] (rows=26 width=227) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_5] (rows=13 width=235) + Group By Operator [GBY_5] (rows=26 width=235) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0, KEY._col1 <-Map 1 [GROUP] GROUP [RS_4] @@ -2590,11 +2590,11 @@ Stage-0 Stage-1 Reducer 3 File Output Operator [FS_24] - Select Operator [SEL_23] (rows=20 width=223) + Select Operator [SEL_23] (rows=23 width=223) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_22] (rows=20 width=230) + Filter Operator [FIL_22] (rows=23 width=231) predicate:CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END - Join Operator [JOIN_21] (rows=41 width=229) + Join Operator [JOIN_21] (rows=47 width=231) Output:["_col0","_col1","_col2","_col4","_col5","_col8"],condition map:[{"":"{\"type\":\"Left Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0, _col1","1":"_col0, _col1"} <-Reducer 2 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_19] @@ -2627,9 +2627,9 @@ Stage-0 <-Reducer 7 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_20] PartitionCols:_col0, _col1 - Select Operator [SEL_15] (rows=4 width=223) + Select Operator [SEL_15] (rows=8 width=223) Output:["_col0","_col1","_col2"] - Group By Operator [GBY_14] (rows=4 width=219) + Group By Operator [GBY_14] (rows=8 width=219) Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 <-Map 6 [GROUP] GROUP [RS_13]