diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 857b83e..9d54f74 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -29,6 +29,7 @@ import java.util.BitSet; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.Deque; import java.util.EnumSet; import java.util.HashMap; @@ -66,6 +67,8 @@ import org.apache.calcite.rel.RelCollationImpl; import org.apache.calcite.rel.RelCollations; import org.apache.calcite.rel.RelFieldCollation; +import org.apache.calcite.rel.RelFieldCollation.Direction; +import org.apache.calcite.rel.RelFieldCollation.NullDirection; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Aggregate; import org.apache.calcite.rel.core.AggregateCall; @@ -2916,7 +2919,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException * top constraining Select * @throws SemanticException */ - private Pair genOBLogicalPlan(QB qb, RelNode srcRel, boolean outermostOB) + private Pair genOBLogicalPlan(QB qb, RelNode srcRel, RelNode starSrcRel, boolean outermostOB) throws SemanticException { RelNode sortRel = null; RelNode originalOBChild = null; @@ -2939,8 +2942,8 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException // 2. Walk through OB exprs and extract field collations and additional // virtual columns needed final List newVCLst = new ArrayList(); - final List fieldCollations = Lists.newArrayList(); - int fieldIndex = 0; + final List orderByInfos = Lists.newArrayList(); + int innerIndex = 0; List obASTExprLst = obAST.getChildren(); ASTNode obASTExpr; @@ -2948,12 +2951,16 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException List> vcASTTypePairs = new ArrayList>(); RowResolver inputRR = relToHiveRR.get(srcRel); RowResolver outputRR = new RowResolver(); + RowResolver starSrcRR = relToHiveRR.get(starSrcRel); RexNode rnd; - RexNodeConverter converter = new RexNodeConverter(cluster, srcRel.getRowType(), - relToHiveColNameCalcitePosMap.get(srcRel), 0, false); + RexNodeConverter converter = null; + RelNode input = null; int srcRelRecordSz = srcRel.getRowType().getFieldCount(); + List needToAddRexNode = new ArrayList<>(); + List> needToAddvcASTTypePairs = new ArrayList>(); + for (int i = 0; i < obASTExprLst.size(); i++) { // 2.1 Convert AST Expr to ExprNode obASTExpr = (ASTNode) obASTExprLst.get(i); @@ -2962,21 +2969,51 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException Map astToExprNDescMap = TypeCheckProcFactory.genExprNode( obASTExpr, new TypeCheckCtx(inputRR)); ExprNodeDesc obExprNDesc = astToExprNDescMap.get(ref); - if (obExprNDesc == null) - throw new SemanticException("Invalid order by expression: " + obASTExpr.toString()); + boolean isFromStar = false; + if (obExprNDesc == null) { + Map alternativeAstToExprNDescMap = TypeCheckProcFactory + .genExprNode(obASTExpr, new TypeCheckCtx(starSrcRR)); + obExprNDesc = alternativeAstToExprNDescMap.get(ref); + if (obExprNDesc == null) { + throw new SemanticException("Invalid order by expression: " + obASTExpr.toString()); + } + isFromStar = true; + if (srcRel instanceof HiveProject) { + // there should be only one input for select + input = srcRel.getInput(0); + converter = new RexNodeConverter(cluster, input.getRowType(), + relToHiveColNameCalcitePosMap.get(input), 0, false); + } + } + else { + converter = new RexNodeConverter(cluster, srcRel.getRowType(), + relToHiveColNameCalcitePosMap.get(srcRel), 0, false); + } // 2.2 Convert ExprNode to RexNode rnd = converter.convert(obExprNDesc); - + if (isFromStar) { + needToAddRexNode.add(rnd); + } // 2.3 Determine the index of ob expr in child schema // NOTE: Calcite can not take compound exprs in OB without it being // present in the child (& hence we add a child Project Rel) - if (rnd instanceof RexInputRef) { - fieldIndex = ((RexInputRef) rnd).getIndex(); + OrderByType type = null; + if (isFromStar) { + innerIndex = needToAddvcASTTypePairs.size(); + needToAddvcASTTypePairs + .add(new Pair(ref, obExprNDesc.getTypeInfo())); + type = OrderByType.STAR; } else { - fieldIndex = srcRelRecordSz + newVCLst.size(); - newVCLst.add(rnd); - vcASTTypePairs.add(new Pair(ref, obExprNDesc.getTypeInfo())); + if (rnd instanceof RexInputRef) { + innerIndex = ((RexInputRef) rnd).getIndex(); + type = OrderByType.REF; + } else { + innerIndex = newVCLst.size(); + newVCLst.add(rnd); + vcASTTypePairs.add(new Pair(ref, obExprNDesc.getTypeInfo())); + type = OrderByType.VC; + } } // 2.4 Determine the Direction of order by @@ -2995,27 +3032,41 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException } // 2.5 Add to field collations - fieldCollations.add(new RelFieldCollation(fieldIndex, order, nullOrder)); + orderByInfos.add(new OrderByInfo(type, innerIndex, order, nullOrder)); + } + + // replace the srcRel, add those needed to add + RowResolver selSyntheticProjectRR = inputRR.duplicate(); + RelNode selSyntheticRel = srcRel; + if (!needToAddRexNode.isEmpty()) { + // first rearrange fieldCollations + // the ordering is like this + // --RexInputRef--isFromStar--newVCLst-- + List originalInputRefs = ((HiveProject) srcRel).getChildExps(); + int vcolPos = inputRR.getRowSchema().getSignature().size(); + for (Pair astTypePair : needToAddvcASTTypePairs) { + selSyntheticProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo( + SemanticAnalyzer.getColumnInternalName(vcolPos), astTypePair.getValue(), null, + false)); + vcolPos++; + } + selSyntheticRel = genSelectRelNode(CompositeList.of(originalInputRefs, needToAddRexNode), + selSyntheticProjectRR, input); } // 3. Add Child Project Rel if needed, Generate Output RR, input Sel Rel // for top constraining Sel - RelNode obInputRel = srcRel; + RelNode obInputRel = selSyntheticRel; if (!newVCLst.isEmpty()) { - List originalInputRefs = Lists.transform(srcRel.getRowType().getFieldList(), + List originalInputRefs = Lists.transform(selSyntheticRel.getRowType().getFieldList(), new Function() { @Override public RexNode apply(RelDataTypeField input) { return new RexInputRef(input.getIndex(), input.getType()); } }); - RowResolver obSyntheticProjectRR = new RowResolver(); - if (!RowResolver.add(obSyntheticProjectRR, inputRR)) { - throw new CalciteSemanticException( - "Duplicates detected when adding columns to RR: see previous message", - UnsupportedFeature.Duplicates_in_RR); - } - int vcolPos = inputRR.getRowSchema().getSignature().size(); + RowResolver obSyntheticProjectRR = selSyntheticProjectRR.duplicate(); + int vcolPos = selSyntheticProjectRR.getRowSchema().getSignature().size(); for (Pair astTypePair : vcASTTypePairs) { obSyntheticProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo( SemanticAnalyzer.getColumnInternalName(vcolPos), astTypePair.getValue(), null, @@ -3023,34 +3074,34 @@ public RexNode apply(RelDataTypeField input) { vcolPos++; } obInputRel = genSelectRelNode(CompositeList.of(originalInputRefs, newVCLst), - obSyntheticProjectRR, srcRel); - - if (outermostOB) { - if (!RowResolver.add(outputRR, inputRR)) { - throw new CalciteSemanticException( - "Duplicates detected when adding columns to RR: see previous message", - UnsupportedFeature.Duplicates_in_RR); - } - - } else { - if (!RowResolver.add(outputRR, obSyntheticProjectRR)) { - throw new CalciteSemanticException( - "Duplicates detected when adding columns to RR: see previous message", - UnsupportedFeature.Duplicates_in_RR); - } - originalOBChild = srcRel; + obSyntheticProjectRR, selSyntheticRel); + if (!outermostOB) { + originalOBChild = selSyntheticRel; } - } else { - if (!RowResolver.add(outputRR, inputRR)) { - throw new CalciteSemanticException( - "Duplicates detected when adding columns to RR: see previous message", - UnsupportedFeature.Duplicates_in_RR); - } - } + } // 4. Construct SortRel + final List collations = Lists.newArrayList(); + for (OrderByInfo info : orderByInfos) { + int globalIndex = 0; + switch (info.type) { + case REF: + globalIndex = info.innerIndex; + break; + case STAR: + globalIndex = info.innerIndex + srcRelRecordSz; + break; + case VC: + globalIndex = info.innerIndex + srcRelRecordSz + needToAddvcASTTypePairs.size(); + break; + default: + break; + } + collations.add(new RelFieldCollation(globalIndex, info.direction, info.nullDirection)); + } + RelTraitSet traitSet = cluster.traitSetOf(HiveRelNode.CONVENTION); - RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations)); + RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(collations)); sortRel = new HiveSortLimit(cluster, traitSet, obInputRel, canonizedCollation, null, null); // 5. Update the maps @@ -3059,10 +3110,27 @@ public RexNode apply(RelDataTypeField input) { // rowtype of sortrel is the type of it child; if child happens to be // synthetic project that we introduced then that projectrel would // contain the vc. + outputRR = inputRR.duplicate(); ImmutableMap hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap( outputRR, sortRel); relToHiveRR.put(sortRel, outputRR); relToHiveColNameCalcitePosMap.put(sortRel, hiveColNameCalcitePosMap); + + if (!needToAddRexNode.isEmpty() || !newVCLst.isEmpty()) { + List originalInputRefs = Lists.transform(obInputRel.getRowType().getFieldList(), + new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }); + List selectedRefs = Lists.newArrayList(); + for (int index = 0; index < originalInputRefs.size() - needToAddRexNode.size() + - newVCLst.size(); index++) { + selectedRefs.add(originalInputRefs.get(index)); + } + sortRel = genSelectRelNode(selectedRefs, inputRR.duplicate(), sortRel); + } } return (new Pair(sortRel, originalOBChild)); @@ -3864,7 +3932,7 @@ private RelNode genLogicalPlan(QB qb, boolean outerMostQB, srcRel = (selectRel == null) ? srcRel : selectRel; // 6. Build Rel for OB Clause - Pair obTopProjPair = genOBLogicalPlan(qb, srcRel, outerMostQB); + Pair obTopProjPair = genOBLogicalPlan(qb, srcRel, starSrcRel, outerMostQB); obRel = obTopProjPair.getKey(); RelNode topConstrainingProjArgsRel = obTopProjPair.getValue(); srcRel = (obRel == null) ? srcRel : obRel; @@ -4063,4 +4131,25 @@ private QBParseInfo getQBParseInfo(QB qb) throws CalciteSemanticException { NATIVE } + private enum OrderByType { + REF, + STAR, + VC + } + + private class OrderByInfo { + OrderByType type; + int innerIndex; + RelFieldCollation.Direction direction; + RelFieldCollation.NullDirection nullDirection; + public OrderByInfo(OrderByType type, int innerIndex, Direction direction, + NullDirection nullDirection) { + super(); + this.type = type; + this.innerIndex = innerIndex; + this.direction = direction; + this.nullDirection = nullDirection; + } + } + } diff --git a/ql/src/test/queries/clientpositive/explainuser_1.q b/ql/src/test/queries/clientpositive/explainuser_1.q index a6fbb54..d7f01d1 100644 --- a/ql/src/test/queries/clientpositive/explainuser_1.q +++ b/ql/src/test/queries/clientpositive/explainuser_1.q @@ -85,6 +85,8 @@ explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_ explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5; explain select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5; +explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c limit 5; + explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5; explain select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0); diff --git a/ql/src/test/queries/clientpositive/order_by_expr.q b/ql/src/test/queries/clientpositive/order_by_expr.q new file mode 100644 index 0000000..829cef6 --- /dev/null +++ b/ql/src/test/queries/clientpositive/order_by_expr.q @@ -0,0 +1,37 @@ +set hive.fetch.task.conversion=none; + +create table t(a int, b int); + +insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9); + +explain +select + interval '2-2' year to month + interval '3-3' year to month, + interval '2-2' year to month - interval '3-3' year to month +from t +order by interval '2-2' year to month + interval '3-3' year to month +limit 2; + +select a,b, count(*) from t group by a, b order by a+b; +select a,b,count(*),a+b from t group by a, b order by a+b; +select a,b from t order by a+b; +select a,b,a+b from t order by a+b; +select a,b,a+b from t order by a+b desc; +select cast(0.99999999999999999999 as decimal(20,19)) as c from t limit 1; +select cast(0.99999999999999999999 as decimal(20,19)) as c from t order by c limit 1; +select a from t order by b; +select a from t order by 0-b; +select b from t order by 0-b; +select b from t order by a, 0-b; +select b from t order by a+1, 0-b; +select b from t order by 0-b, a+1; +explain select b from t order by 0-b, a+1; +select a,b from t order by 0-b; +select a,b from t order by a, a+1, 0-b; +select a,b from t order by 0-b, a+1; +select a+1,b from t order by a, a+1, 0-b; +select a+1 as c, b from t order by a, a+1, 0-b; +select a, a+1 as c, b from t order by a, a+1, 0-b; +select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b; +explain select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b; +select a, a+1 as c, b, 2*b from t order by c+1, 0-b; diff --git a/ql/src/test/results/clientpositive/cp_sel.q.out b/ql/src/test/results/clientpositive/cp_sel.q.out index 6300578..4ab661a 100644 --- a/ql/src/test/results/clientpositive/cp_sel.q.out +++ b/ql/src/test/results/clientpositive/cp_sel.q.out @@ -5,46 +5,24 @@ POSTHOOK: query: explain select key,value,'hello' as ds, 'world' as hr from srcpart where hr=11 order by 1 limit 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), 'hello' (type: string), 'world' (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 1 - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: 1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), 'hello' (type: string), 'world' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key,value,'hello' as ds, 'world' as hr from srcpart where hr=11 order by 1 limit 1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/decimal_stats.q.out b/ql/src/test/results/clientpositive/decimal_stats.q.out index 5af58fb..580acd2 100644 --- a/ql/src/test/results/clientpositive/decimal_stats.q.out +++ b/ql/src/test/results/clientpositive/decimal_stats.q.out @@ -54,46 +54,24 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select * from decimal_1 order by 1 limit 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: decimal_1 - Statistics: Num rows: 500 Data size: 112112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: t (type: decimal(4,2)), u (type: decimal(5,0)), v (type: decimal(10,0)) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 112112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 500 Data size: 112112 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: decimal(4,2)), _col1 (type: decimal(5,0)), _col2 (type: decimal(10,0)) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: decimal(4,2)), VALUE._col1 (type: decimal(5,0)), VALUE._col2 (type: decimal(10,0)) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 112112 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 22512 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 100 Data size: 22512 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: 100 Processor Tree: - ListSink + TableScan + alias: decimal_1 + Statistics: Num rows: 500 Data size: 112112 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: t (type: decimal(4,2)), u (type: decimal(5,0)), v (type: decimal(10,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 112112 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 100 + Statistics: Num rows: 100 Data size: 22512 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: drop table decimal_1 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out index 62f40cd..60a7d59 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out @@ -322,8 +322,8 @@ STAGE PLANS: predicate: ((UDFToInteger(grouping(_col2, 1)) = 1) or (UDFToInteger(grouping(_col2, 0)) = 1)) (type: boolean) Statistics: Num rows: 6 Data size: 60 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1) + grouping(_col2, 0)) (type: tinyint) - outputColumnNames: _col0, _col1, _col2 + expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1) + grouping(_col2, 0)) (type: tinyint), CASE WHEN (((grouping(_col2, 1) + grouping(_col2, 0)) = 1)) THEN (_col0) ELSE (null) END (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 60 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -337,7 +337,7 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col2 (type: tinyint), CASE WHEN ((_col2 = 1)) THEN (_col0) ELSE (null) END (type: int) + key expressions: _col2 (type: tinyint), _col3 (type: int) sort order: -+ Statistics: Num rows: 6 Data size: 60 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int) diff --git a/ql/src/test/results/clientpositive/llap/cbo_limit.q.out b/ql/src/test/results/clientpositive/llap/cbo_limit.q.out index c582578..85458a4 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_limit.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_limit.q.out @@ -66,8 +66,8 @@ POSTHOOK: Input: default@cbo_t2 POSTHOOK: Input: default@cbo_t2@dt=2014 POSTHOOK: Input: default@cbo_t3 #### A masked pattern was here #### -1 12 6 1 2 6 +1 12 6 PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int, c desc limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out index c582578..85458a4 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out @@ -66,8 +66,8 @@ POSTHOOK: Input: default@cbo_t2 POSTHOOK: Input: default@cbo_t2@dt=2014 POSTHOOK: Input: default@cbo_t3 #### A masked pattern was here #### -1 12 6 1 2 6 +1 12 6 PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int, c desc limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out index c3f8071..3354941 100644 --- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out @@ -359,13 +359,13 @@ Stage-0 limit:-1 Stage-1 Reducer 7 llap - File Output Operator [FS_42] + File Output Operator [FS_43] Select Operator [SEL_41] (rows=1 width=20) Output:["_col0","_col1","_col2"] <-Reducer 6 [SIMPLE_EDGE] llap SHUFFLE [RS_40] - Select Operator [SEL_38] (rows=1 width=20) - Output:["_col0","_col1","_col2"] + Select Operator [SEL_38] (rows=1 width=28) + Output:["_col0","_col1","_col2","_col3"] Group By Operator [GBY_37] (rows=1 width=20) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 5 [SIMPLE_EDGE] llap @@ -377,14 +377,14 @@ Stage-0 Output:["_col2","_col6"] Filter Operator [FIL_33] (rows=3 width=16) predicate:((_col1 > 0) or (_col6 >= 0)) - Merge Join Operator [MERGEJOIN_52] (rows=3 width=16) + Merge Join Operator [MERGEJOIN_53] (rows=3 width=16) Conds:RS_30._col0=RS_31._col0(Inner),Output:["_col1","_col2","_col6"] <-Map 11 [SIMPLE_EDGE] llap SHUFFLE [RS_31] PartitionCols:_col0 Select Operator [SEL_29] (rows=18 width=84) Output:["_col0","_col1"] - Filter Operator [FIL_50] (rows=18 width=84) + Filter Operator [FIL_51] (rows=18 width=84) predicate:key is not null TableScan [TS_27] (rows=20 width=84) default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"] @@ -395,7 +395,7 @@ Stage-0 Output:["_col0","_col1","_col2"] Filter Operator [FIL_25] (rows=1 width=101) predicate:((_col1 + _col4) >= 0) - Merge Join Operator [MERGEJOIN_51] (rows=1 width=101) + Merge Join Operator [MERGEJOIN_52] (rows=1 width=101) Conds:RS_22._col0=RS_23._col0(Inner),Output:["_col0","_col1","_col2","_col4"] <-Reducer 10 [SIMPLE_EDGE] llap SHUFFLE [RS_23] @@ -413,7 +413,7 @@ Stage-0 PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_14] (rows=2 width=101) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float - Filter Operator [FIL_49] (rows=5 width=93) + Filter Operator [FIL_50] (rows=5 width=93) predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and key is not null) TableScan [TS_11] (rows=20 width=88) default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] @@ -433,7 +433,7 @@ Stage-0 PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_3] (rows=2 width=101) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float - Filter Operator [FIL_48] (rows=5 width=93) + Filter Operator [FIL_49] (rows=5 width=93) predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and key is not null) TableScan [TS_0] (rows=20 width=88) default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] @@ -458,78 +458,80 @@ Stage-0 limit:-1 Stage-1 Reducer 7 llap - File Output Operator [FS_39] + File Output Operator [FS_40] Select Operator [SEL_38] (rows=1 width=20) Output:["_col0","_col1","_col2"] <-Reducer 6 [SIMPLE_EDGE] llap SHUFFLE [RS_37] - Group By Operator [GBY_34] (rows=1 width=20) - Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 - <-Reducer 5 [SIMPLE_EDGE] llap - SHUFFLE [RS_33] - PartitionCols:_col0, _col1 - Group By Operator [GBY_32] (rows=1 width=20) - Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col6, _col2 - Select Operator [SEL_31] (rows=1 width=16) - Output:["_col6","_col2"] - Filter Operator [FIL_30] (rows=1 width=16) - predicate:(((_col1 > 0) or (_col6 >= 0)) and ((_col6 >= 1) or (_col2 >= 1)) and ((UDFToLong(_col6) + _col2) >= 0)) - Merge Join Operator [MERGEJOIN_48] (rows=3 width=16) - Conds:RS_27._col0=RS_28._col0(Inner),Output:["_col1","_col2","_col6"] - <-Map 10 [SIMPLE_EDGE] llap - SHUFFLE [RS_28] - PartitionCols:_col0 - Select Operator [SEL_26] (rows=18 width=84) - Output:["_col0","_col1"] - Filter Operator [FIL_46] (rows=18 width=84) - predicate:((c_int > 0) and key is not null) - TableScan [TS_24] (rows=20 width=84) - default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"] - <-Reducer 4 [SIMPLE_EDGE] llap - SHUFFLE [RS_27] - PartitionCols:_col0 - Select Operator [SEL_23] (rows=1 width=101) - Output:["_col0","_col1","_col2"] - Filter Operator [FIL_22] (rows=1 width=101) - predicate:((_col1 + _col4) >= 0) - Merge Join Operator [MERGEJOIN_47] (rows=1 width=101) - Conds:RS_19._col0=RS_20._col0(Left Outer),Output:["_col0","_col1","_col2","_col4"] - <-Reducer 3 [SIMPLE_EDGE] llap - SHUFFLE [RS_19] - PartitionCols:_col0 - Select Operator [SEL_9] (rows=1 width=97) - Output:["_col0","_col1","_col2"] - <-Reducer 2 [SIMPLE_EDGE] llap - SHUFFLE [RS_8] - Select Operator [SEL_6] (rows=1 width=105) - Output:["_col0","_col1","_col2","_col3"] - Group By Operator [GBY_5] (rows=1 width=101) - Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 - <-Map 1 [SIMPLE_EDGE] llap - SHUFFLE [RS_4] - PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_3] (rows=1 width=101) - Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float - Filter Operator [FIL_44] (rows=1 width=93) - predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null) - TableScan [TS_0] (rows=20 width=88) - default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] - <-Reducer 9 [SIMPLE_EDGE] llap - SHUFFLE [RS_20] - PartitionCols:_col0 - Select Operator [SEL_17] (rows=1 width=89) - Output:["_col0","_col1"] - Group By Operator [GBY_16] (rows=1 width=93) - Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2 - <-Map 8 [SIMPLE_EDGE] llap - SHUFFLE [RS_15] - PartitionCols:_col0, _col1, _col2 - Group By Operator [GBY_14] (rows=1 width=93) - Output:["_col0","_col1","_col2"],keys:key, c_int, c_float - Filter Operator [FIL_45] (rows=1 width=93) - predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null) - TableScan [TS_11] (rows=20 width=88) - default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] + Select Operator [SEL_35] (rows=1 width=28) + Output:["_col0","_col1","_col2","_col3"] + Group By Operator [GBY_34] (rows=1 width=20) + Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 + <-Reducer 5 [SIMPLE_EDGE] llap + SHUFFLE [RS_33] + PartitionCols:_col0, _col1 + Group By Operator [GBY_32] (rows=1 width=20) + Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col6, _col2 + Select Operator [SEL_31] (rows=1 width=16) + Output:["_col6","_col2"] + Filter Operator [FIL_30] (rows=1 width=16) + predicate:(((_col1 > 0) or (_col6 >= 0)) and ((_col6 >= 1) or (_col2 >= 1)) and ((UDFToLong(_col6) + _col2) >= 0)) + Merge Join Operator [MERGEJOIN_49] (rows=3 width=16) + Conds:RS_27._col0=RS_28._col0(Inner),Output:["_col1","_col2","_col6"] + <-Map 10 [SIMPLE_EDGE] llap + SHUFFLE [RS_28] + PartitionCols:_col0 + Select Operator [SEL_26] (rows=18 width=84) + Output:["_col0","_col1"] + Filter Operator [FIL_47] (rows=18 width=84) + predicate:((c_int > 0) and key is not null) + TableScan [TS_24] (rows=20 width=84) + default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"] + <-Reducer 4 [SIMPLE_EDGE] llap + SHUFFLE [RS_27] + PartitionCols:_col0 + Select Operator [SEL_23] (rows=1 width=101) + Output:["_col0","_col1","_col2"] + Filter Operator [FIL_22] (rows=1 width=101) + predicate:((_col1 + _col4) >= 0) + Merge Join Operator [MERGEJOIN_48] (rows=1 width=101) + Conds:RS_19._col0=RS_20._col0(Left Outer),Output:["_col0","_col1","_col2","_col4"] + <-Reducer 3 [SIMPLE_EDGE] llap + SHUFFLE [RS_19] + PartitionCols:_col0 + Select Operator [SEL_9] (rows=1 width=97) + Output:["_col0","_col1","_col2"] + <-Reducer 2 [SIMPLE_EDGE] llap + SHUFFLE [RS_8] + Select Operator [SEL_6] (rows=1 width=105) + Output:["_col0","_col1","_col2","_col3"] + Group By Operator [GBY_5] (rows=1 width=101) + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 + <-Map 1 [SIMPLE_EDGE] llap + SHUFFLE [RS_4] + PartitionCols:_col0, _col1, _col2 + Group By Operator [GBY_3] (rows=1 width=101) + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float + Filter Operator [FIL_45] (rows=1 width=93) + predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null) + TableScan [TS_0] (rows=20 width=88) + default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] + <-Reducer 9 [SIMPLE_EDGE] llap + SHUFFLE [RS_20] + PartitionCols:_col0 + Select Operator [SEL_17] (rows=1 width=89) + Output:["_col0","_col1"] + Group By Operator [GBY_16] (rows=1 width=93) + Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2 + <-Map 8 [SIMPLE_EDGE] llap + SHUFFLE [RS_15] + PartitionCols:_col0, _col1, _col2 + Group By Operator [GBY_14] (rows=1 width=93) + Output:["_col0","_col1","_col2"],keys:key, c_int, c_float + Filter Operator [FIL_46] (rows=1 width=93) + predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null) + TableScan [TS_11] (rows=20 width=88) + default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c PREHOOK: type: QUERY @@ -1411,6 +1413,111 @@ Stage-0 TableScan [TS_0] (rows=20 width=84) default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"] +PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c limit 5 +PREHOOK: type: QUERY +POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c limit 5 +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +Reducer 4 <- Reducer 3 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) +Reducer 5 <- Map 10 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE) +Reducer 6 <- Reducer 5 (SIMPLE_EDGE) +Reducer 8 <- Map 7 (SIMPLE_EDGE) +Reducer 9 <- Reducer 8 (SIMPLE_EDGE) + +Stage-0 + Fetch Operator + limit:5 + Stage-1 + Reducer 6 llap + File Output Operator [FS_46] + Limit [LIM_45] (rows=1 width=20) + Number of rows:5 + Select Operator [SEL_44] (rows=1 width=20) + Output:["_col0","_col1","_col2"] + Group By Operator [GBY_43] (rows=1 width=20) + Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 + <-Reducer 5 [SIMPLE_EDGE] llap + SHUFFLE [RS_42] + PartitionCols:_col0, _col1 + Group By Operator [GBY_41] (rows=1 width=20) + Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col2, _col6 + Select Operator [SEL_40] (rows=3 width=16) + Output:["_col2","_col6"] + Filter Operator [FIL_39] (rows=3 width=16) + predicate:((_col1 > 0) or (_col6 >= 0)) + Merge Join Operator [MERGEJOIN_58] (rows=3 width=16) + Conds:RS_36._col0=RS_37._col0(Inner),Output:["_col1","_col2","_col6"] + <-Map 10 [SIMPLE_EDGE] llap + SHUFFLE [RS_37] + PartitionCols:_col0 + Select Operator [SEL_35] (rows=18 width=84) + Output:["_col0","_col1"] + Filter Operator [FIL_56] (rows=18 width=84) + predicate:key is not null + TableScan [TS_33] (rows=20 width=84) + default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"] + <-Reducer 4 [SIMPLE_EDGE] llap + SHUFFLE [RS_36] + PartitionCols:_col0 + Select Operator [SEL_32] (rows=1 width=101) + Output:["_col0","_col1","_col2"] + Filter Operator [FIL_31] (rows=1 width=101) + predicate:((_col1 + _col4) >= 0) + Merge Join Operator [MERGEJOIN_57] (rows=2 width=101) + Conds:RS_28._col0=RS_29._col0(Inner),Output:["_col0","_col1","_col2","_col4"] + <-Reducer 3 [SIMPLE_EDGE] llap + SHUFFLE [RS_28] + PartitionCols:_col0 + Filter Operator [FIL_12] (rows=2 width=97) + predicate:_col0 is not null + Limit [LIM_10] (rows=3 width=97) + Number of rows:5 + Select Operator [SEL_9] (rows=3 width=97) + Output:["_col0","_col1","_col2"] + <-Reducer 2 [SIMPLE_EDGE] llap + SHUFFLE [RS_8] + Select Operator [SEL_6] (rows=3 width=97) + Output:["_col0","_col1","_col2"] + Group By Operator [GBY_5] (rows=3 width=101) + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 + <-Map 1 [SIMPLE_EDGE] llap + SHUFFLE [RS_4] + PartitionCols:_col0, _col1, _col2 + Group By Operator [GBY_3] (rows=3 width=101) + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float + Filter Operator [FIL_53] (rows=6 width=93) + predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0))) + TableScan [TS_0] (rows=20 width=88) + default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] + <-Reducer 9 [SIMPLE_EDGE] llap + SHUFFLE [RS_29] + PartitionCols:_col0 + Filter Operator [FIL_26] (rows=2 width=105) + predicate:_col0 is not null + Limit [LIM_24] (rows=3 width=105) + Number of rows:5 + Select Operator [SEL_23] (rows=3 width=105) + Output:["_col0","_col1"] + <-Reducer 8 [SIMPLE_EDGE] llap + SHUFFLE [RS_22] + Select Operator [SEL_20] (rows=3 width=105) + Output:["_col0","_col1","_col2","_col3"] + Group By Operator [GBY_19] (rows=3 width=101) + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 + <-Map 7 [SIMPLE_EDGE] llap + SHUFFLE [RS_18] + PartitionCols:_col0, _col1, _col2 + Group By Operator [GBY_17] (rows=3 width=101) + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float + Filter Operator [FIL_55] (rows=6 width=93) + predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0))) + TableScan [TS_14] (rows=20 width=88) + default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] + PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 @@ -1429,18 +1536,18 @@ Reducer 9 <- Map 8 (SIMPLE_EDGE) Stage-0 Fetch Operator - limit:5 + limit:-1 Stage-1 Reducer 7 llap - File Output Operator [FS_49] - Limit [LIM_48] (rows=1 width=20) + File Output Operator [FS_50] + Limit [LIM_48] (rows=1 width=28) Number of rows:5 - Select Operator [SEL_47] (rows=1 width=20) + Select Operator [SEL_47] (rows=1 width=28) Output:["_col0","_col1","_col2"] <-Reducer 6 [SIMPLE_EDGE] llap SHUFFLE [RS_46] - Select Operator [SEL_44] (rows=1 width=20) - Output:["_col0","_col1","_col2"] + Select Operator [SEL_44] (rows=1 width=28) + Output:["_col0","_col1","_col2","_col3"] Group By Operator [GBY_43] (rows=1 width=20) Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 5 [SIMPLE_EDGE] llap @@ -1452,14 +1559,14 @@ Stage-0 Output:["_col2","_col6"] Filter Operator [FIL_39] (rows=3 width=16) predicate:((_col1 > 0) or (_col6 >= 0)) - Merge Join Operator [MERGEJOIN_61] (rows=3 width=16) + Merge Join Operator [MERGEJOIN_62] (rows=3 width=16) Conds:RS_36._col0=RS_37._col0(Inner),Output:["_col1","_col2","_col6"] <-Map 11 [SIMPLE_EDGE] llap SHUFFLE [RS_37] PartitionCols:_col0 Select Operator [SEL_35] (rows=18 width=84) Output:["_col0","_col1"] - Filter Operator [FIL_59] (rows=18 width=84) + Filter Operator [FIL_60] (rows=18 width=84) predicate:key is not null TableScan [TS_33] (rows=20 width=84) default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"] @@ -1470,7 +1577,7 @@ Stage-0 Output:["_col0","_col1","_col2"] Filter Operator [FIL_31] (rows=1 width=101) predicate:((_col1 + _col4) >= 0) - Merge Join Operator [MERGEJOIN_60] (rows=2 width=101) + Merge Join Operator [MERGEJOIN_61] (rows=2 width=101) Conds:RS_28._col0=RS_29._col0(Inner),Output:["_col0","_col1","_col2","_col4"] <-Reducer 10 [SIMPLE_EDGE] llap SHUFFLE [RS_29] @@ -1492,7 +1599,7 @@ Stage-0 PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_17] (rows=3 width=101) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float - Filter Operator [FIL_58] (rows=6 width=93) + Filter Operator [FIL_59] (rows=6 width=93) predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0))) TableScan [TS_14] (rows=20 width=88) default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] @@ -1516,7 +1623,7 @@ Stage-0 PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_3] (rows=3 width=101) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float - Filter Operator [FIL_56] (rows=6 width=93) + Filter Operator [FIL_57] (rows=6 width=93) predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0))) TableScan [TS_0] (rows=20 width=88) default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out index 144356c..4e6cbdf 100644 --- a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out @@ -1047,49 +1047,19 @@ POSTHOOK: query: explain select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 3.14 (type: decimal(4,2)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 3.14 (type: decimal(4,2)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(3.14 as decimal(4,2)) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1107,49 +1077,19 @@ POSTHOOK: query: explain select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 3.14 (type: decimal(4,2)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 3.14 (type: decimal(4,2)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1167,49 +1107,19 @@ POSTHOOK: query: explain select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 1355944339.1234567 (type: decimal(30,8)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 1355944339.1234567 (type: decimal(30,8)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1219,7 +1129,7 @@ POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_2 #### A masked pattern was here #### -1355944339.12345670 +1355944339.1234567 PREHOOK: query: explain select cast(true as decimal) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1227,49 +1137,19 @@ POSTHOOK: query: explain select cast(true as decimal) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 1 (type: decimal(10,0)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 1 (type: decimal(10,0)) + outputColumnNames: _col0 + ListSink PREHOOK: query: explain select cast(true as decimal) as c from decimal_2 order by c @@ -1278,49 +1158,19 @@ POSTHOOK: query: explain select cast(true as decimal) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 1 (type: decimal(10,0)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 1 (type: decimal(10,0)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(true as decimal) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1338,49 +1188,19 @@ POSTHOOK: query: explain select cast(3Y as decimal) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 3 (type: decimal(10,0)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 3 (type: decimal(10,0)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(3Y as decimal) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1398,49 +1218,19 @@ POSTHOOK: query: explain select cast(3S as decimal) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 3 (type: decimal(10,0)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 3 (type: decimal(10,0)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(3S as decimal) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1458,49 +1248,19 @@ POSTHOOK: query: explain select cast(cast(3 as int) as decimal) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 3 (type: decimal(10,0)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 3 (type: decimal(10,0)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(cast(3 as int) as decimal) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1518,49 +1278,19 @@ POSTHOOK: query: explain select cast(3L as decimal) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 3 (type: decimal(10,0)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 3 (type: decimal(10,0)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(3L as decimal) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1578,49 +1308,19 @@ POSTHOOK: query: explain select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 1 (type: decimal(20,19)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 1 (type: decimal(20,19)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1630,7 +1330,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_2 #### A masked pattern was here #### -1.0000000000000000000 +1 PREHOOK: query: explain select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c PREHOOK: type: QUERY @@ -1638,49 +1338,19 @@ POSTHOOK: query: explain select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: decimal_2 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: vectorized, llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 0.99999999999999999999 (type: decimal(20,20)) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: decimal_2 + Select Operator + expressions: 0.99999999999999999999 (type: decimal(20,20)) + outputColumnNames: _col0 + ListSink PREHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out index 8e7cd63..3e89555 100644 --- a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out @@ -116,11 +116,11 @@ STAGE PLANS: alias: decimal_tbl_txt Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)) - outputColumnNames: _col0 + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, -1) (type: decimal(11,0)) + key expressions: _col2 (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -270,11 +270,11 @@ STAGE PLANS: alias: decimal_tbl_rc Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)) - outputColumnNames: _col0 + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, -1) (type: decimal(11,0)) + key expressions: _col2 (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -424,11 +424,11 @@ STAGE PLANS: alias: decimal_tbl_orc Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)) - outputColumnNames: _col0 + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, -1) (type: decimal(11,0)) + key expressions: _col2 (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out index 391c775..1d8f609 100644 --- a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out @@ -501,9 +501,6 @@ STAGE PLANS: Stage: Stage-1 Tez #### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: @@ -511,31 +508,21 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE Select Operator + expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) + outputColumnNames: _col0, _col1 Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: CAST( 5-5 AS INTERVAL YEAR TO MONTH) (type: interval_year_month) - sort order: + - Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 + Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized, llap LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/order_by_expr.q.out b/ql/src/test/results/clientpositive/order_by_expr.q.out new file mode 100644 index 0000000..4ddffe9 --- /dev/null +++ b/ql/src/test/results/clientpositive/order_by_expr.q.out @@ -0,0 +1,489 @@ +PREHOOK: query: create table t(a int, b int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t +POSTHOOK: query: create table t(a int, b int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t +PREHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +PREHOOK: type: QUERY +PREHOOK: Output: default@t +POSTHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +POSTHOOK: type: QUERY +POSTHOOK: Output: default@t +POSTHOOK: Lineage: t.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: t.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +select + interval '2-2' year to month + interval '3-3' year to month, + interval '2-2' year to month - interval '3-3' year to month +from t +order by interval '2-2' year to month + interval '3-3' year to month +limit 2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select + interval '2-2' year to month + interval '3-3' year to month, + interval '2-2' year to month - interval '3-3' year to month +from t +order by interval '2-2' year to month + interval '3-3' year to month +limit 2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 9 Data size: 144 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 2 + Processor Tree: + ListSink + +PREHOOK: query: select a,b, count(*) from t group by a, b order by a+b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b, count(*) from t group by a, b order by a+b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 100 1 +20 -100 1 +1 2 2 +1 3 1 +2 4 1 +4 5 1 +3 7 1 +8 9 1 +PREHOOK: query: select a,b,count(*),a+b from t group by a, b order by a+b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b,count(*),a+b from t group by a, b order by a+b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 100 1 -900 +20 -100 1 -80 +1 2 2 3 +1 3 1 4 +2 4 1 6 +4 5 1 9 +3 7 1 10 +8 9 1 17 +PREHOOK: query: select a,b from t order by a+b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b from t order by a+b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 100 +20 -100 +1 2 +1 2 +1 3 +2 4 +4 5 +3 7 +8 9 +PREHOOK: query: select a,b,a+b from t order by a+b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b,a+b from t order by a+b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 100 -900 +20 -100 -80 +1 2 3 +1 2 3 +1 3 4 +2 4 6 +4 5 9 +3 7 10 +8 9 17 +PREHOOK: query: select a,b,a+b from t order by a+b desc +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b,a+b from t order by a+b desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +8 9 17 +3 7 10 +4 5 9 +2 4 6 +1 3 4 +1 2 3 +1 2 3 +20 -100 -80 +-1000 100 -900 +PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +1.0000000000000000000 +PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t order by c limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t order by c limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +1.0000000000000000000 +PREHOOK: query: select a from t order by b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a from t order by b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +20 +1 +1 +1 +2 +4 +3 +8 +-1000 +PREHOOK: query: select a from t order by 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a from t order by 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 +8 +3 +4 +2 +1 +1 +1 +20 +PREHOOK: query: select b from t order by 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select b from t order by 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +100 +9 +7 +5 +4 +3 +2 +2 +-100 +PREHOOK: query: select b from t order by a, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select b from t order by a, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +100 +3 +2 +2 +4 +7 +5 +9 +-100 +PREHOOK: query: select b from t order by a+1, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select b from t order by a+1, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +100 +3 +2 +2 +4 +7 +5 +9 +-100 +PREHOOK: query: select b from t order by 0-b, a+1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select b from t order by 0-b, a+1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +100 +9 +7 +5 +4 +3 +2 +2 +-100 +PREHOOK: query: explain select b from t order by 0-b, a+1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select b from t order by 0-b, a+1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: b (type: int), (a + 1) (type: int), (0 - b) (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int), _col1 (type: int) + sort order: ++ + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select a,b from t order by 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b from t order by 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 100 +8 9 +3 7 +4 5 +2 4 +1 3 +1 2 +1 2 +20 -100 +PREHOOK: query: select a,b from t order by a, a+1, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b from t order by a, a+1, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 100 +1 3 +1 2 +1 2 +2 4 +3 7 +4 5 +8 9 +20 -100 +PREHOOK: query: select a,b from t order by 0-b, a+1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a,b from t order by 0-b, a+1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 100 +8 9 +3 7 +4 5 +2 4 +1 3 +1 2 +1 2 +20 -100 +PREHOOK: query: select a+1,b from t order by a, a+1, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a+1,b from t order by a, a+1, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-999 100 +2 3 +2 2 +2 2 +3 4 +4 7 +5 5 +9 9 +21 -100 +PREHOOK: query: select a+1 as c, b from t order by a, a+1, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a+1 as c, b from t order by a, a+1, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-999 100 +2 3 +2 2 +2 2 +3 4 +4 7 +5 5 +9 9 +21 -100 +PREHOOK: query: select a, a+1 as c, b from t order by a, a+1, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a, a+1 as c, b from t order by a, a+1, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 -999 100 +1 2 3 +1 2 2 +1 2 2 +2 3 4 +3 4 7 +4 5 5 +8 9 9 +20 21 -100 +PREHOOK: query: select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 -999 100 200 +1 2 3 6 +1 2 2 4 +1 2 2 4 +2 3 4 8 +3 4 7 14 +4 5 5 10 +8 9 9 18 +20 21 -100 -200 +PREHOOK: query: explain select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +PREHOOK: type: QUERY +POSTHOOK: query: explain select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int), b (type: int), (2 * b) (type: int), (a + 1) (type: int), (0 - b) (type: int) + outputColumnNames: _col0, _col2, _col3, _col4, _col5 + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col4 (type: int), _col5 (type: int) + sort order: +++ + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: int), _col3 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: int), VALUE._col1 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select a, a+1 as c, b, 2*b from t order by c+1, 0-b +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select a, a+1 as c, b, 2*b from t order by c+1, 0-b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +-1000 -999 100 200 +1 2 3 6 +1 2 2 4 +1 2 2 4 +2 3 4 8 +3 4 7 14 +4 5 5 10 +8 9 9 18 +20 21 -100 -200 diff --git a/ql/src/test/results/clientpositive/perf/query36.q.out b/ql/src/test/results/clientpositive/perf/query36.q.out index b356628..ca70e41 100644 --- a/ql/src/test/results/clientpositive/perf/query36.q.out +++ b/ql/src/test/results/clientpositive/perf/query36.q.out @@ -68,10 +68,10 @@ Reducer 7 <- Reducer 6 (SIMPLE_EDGE) Stage-0 Fetch Operator - limit:100 + limit:-1 Stage-1 Reducer 7 - File Output Operator [FS_35] + File Output Operator [FS_36] Limit [LIM_34] (rows=100 width=88) Number of rows:100 Select Operator [SEL_33] (rows=1149975358 width=88) @@ -79,7 +79,7 @@ Stage-0 <-Reducer 6 [SIMPLE_EDGE] SHUFFLE [RS_32] Select Operator [SEL_30] (rows=1149975358 width=88) - Output:["_col0","_col1","_col2","_col3","_col4"] + Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_29] (rows=1149975358 width=88) Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(_col4 / _col5) ASC NULLS FIRST","partition by:":"(grouping(_col6, 1) + grouping(_col6, 0)), CASE WHEN ((UDFToInteger(grouping(_col6, 0)) = 0)) THEN (_col0) ELSE (null) END"}] Select Operator [SEL_28] (rows=1149975358 width=88) @@ -98,42 +98,42 @@ Stage-0 Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col0, _col1, 0 Select Operator [SEL_21] (rows=766650239 width=88) Output:["_col0","_col1","_col2","_col3"] - Merge Join Operator [MERGEJOIN_51] (rows=766650239 width=88) + Merge Join Operator [MERGEJOIN_52] (rows=766650239 width=88) Conds:RS_18._col1=RS_19._col0(Inner),Output:["_col3","_col4","_col10","_col11"] <-Map 10 [SIMPLE_EDGE] SHUFFLE [RS_19] PartitionCols:_col0 Select Operator [SEL_11] (rows=462000 width=1436) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_48] (rows=462000 width=1436) + Filter Operator [FIL_49] (rows=462000 width=1436) predicate:i_item_sk is not null TableScan [TS_9] (rows=462000 width=1436) default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_class","i_category"] <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_18] PartitionCols:_col1 - Merge Join Operator [MERGEJOIN_50] (rows=696954748 width=88) + Merge Join Operator [MERGEJOIN_51] (rows=696954748 width=88) Conds:RS_15._col2=RS_16._col0(Inner),Output:["_col1","_col3","_col4"] <-Map 9 [SIMPLE_EDGE] SHUFFLE [RS_16] PartitionCols:_col0 Select Operator [SEL_8] (rows=852 width=1910) Output:["_col0"] - Filter Operator [FIL_47] (rows=852 width=1910) + Filter Operator [FIL_48] (rows=852 width=1910) predicate:((s_state) IN ('SD', 'FL', 'MI', 'LA', 'MO', 'SC', 'AL', 'GA') and s_store_sk is not null) TableScan [TS_6] (rows=1704 width=1910) default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_15] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_49] (rows=633595212 width=88) + Merge Join Operator [MERGEJOIN_50] (rows=633595212 width=88) Conds:RS_12._col0=RS_13._col0(Inner),Output:["_col1","_col2","_col3","_col4"] <-Map 1 [SIMPLE_EDGE] SHUFFLE [RS_12] PartitionCols:_col0 Select Operator [SEL_2] (rows=575995635 width=88) Output:["_col0","_col1","_col2","_col3","_col4"] - Filter Operator [FIL_45] (rows=575995635 width=88) + Filter Operator [FIL_46] (rows=575995635 width=88) predicate:(ss_sold_date_sk is not null and ss_item_sk is not null and ss_store_sk is not null) TableScan [TS_0] (rows=575995635 width=88) default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_ext_sales_price","ss_net_profit"] @@ -142,7 +142,7 @@ Stage-0 PartitionCols:_col0 Select Operator [SEL_5] (rows=36524 width=1119) Output:["_col0"] - Filter Operator [FIL_46] (rows=36524 width=1119) + Filter Operator [FIL_47] (rows=36524 width=1119) predicate:((d_year = 1999) and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"] diff --git a/ql/src/test/results/clientpositive/perf/query70.q.out b/ql/src/test/results/clientpositive/perf/query70.q.out index d0900a8..e329aff 100644 --- a/ql/src/test/results/clientpositive/perf/query70.q.out +++ b/ql/src/test/results/clientpositive/perf/query70.q.out @@ -89,10 +89,10 @@ Reducer 7 <- Reducer 6 (SIMPLE_EDGE) Stage-0 Fetch Operator - limit:100 + limit:-1 Stage-1 Reducer 7 - File Output Operator [FS_64] + File Output Operator [FS_65] Limit [LIM_63] (rows=100 width=88) Number of rows:100 Select Operator [SEL_62] (rows=1149975358 width=88) @@ -100,7 +100,7 @@ Stage-0 <-Reducer 6 [SIMPLE_EDGE] SHUFFLE [RS_61] Select Operator [SEL_59] (rows=1149975358 width=88) - Output:["_col0","_col1","_col2","_col3","_col4"] + Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_58] (rows=1149975358 width=88) Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col4 DESC NULLS LAST","partition by:":"(grouping(_col5, 1) + grouping(_col5, 0)), CASE WHEN ((UDFToInteger(grouping(_col5, 0)) = 0)) THEN (_col0) ELSE (null) END"}] Select Operator [SEL_57] (rows=1149975358 width=88) @@ -119,7 +119,7 @@ Stage-0 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)"],keys:_col0, _col1, 0 Select Operator [SEL_50] (rows=766650239 width=88) Output:["_col0","_col1","_col2"] - Merge Join Operator [MERGEJOIN_92] (rows=766650239 width=88) + Merge Join Operator [MERGEJOIN_93] (rows=766650239 width=88) Conds:RS_47._col7=RS_48._col0(Inner),Output:["_col2","_col6","_col7"] <-Reducer 15 [SIMPLE_EDGE] SHUFFLE [RS_48] @@ -133,7 +133,7 @@ Stage-0 Output:["_col0"],keys:_col0 Select Operator [SEL_32] (rows=116159124 width=88) Output:["_col0"] - Filter Operator [FIL_84] (rows=116159124 width=88) + Filter Operator [FIL_85] (rows=116159124 width=88) predicate:(rank_window_0 <= 5) PTF Operator [PTF_31] (rows=348477374 width=88) Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 DESC NULLS LAST","partition by:":"_col0"}] @@ -218,7 +218,7 @@ Stage-0 PartitionCols:_col0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] - Filter Operator [FIL_82] (rows=8116 width=1119) + Filter Operator [FIL_83] (rows=8116 width=1119) predicate:(d_month_seq BETWEEN 1212 AND 1223 and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"] diff --git a/ql/src/test/results/clientpositive/perf/query86.q.out b/ql/src/test/results/clientpositive/perf/query86.q.out index 6377c43..19cdd60 100644 --- a/ql/src/test/results/clientpositive/perf/query86.q.out +++ b/ql/src/test/results/clientpositive/perf/query86.q.out @@ -59,10 +59,10 @@ Reducer 6 <- Reducer 5 (SIMPLE_EDGE) Stage-0 Fetch Operator - limit:100 + limit:-1 Stage-1 Reducer 6 - File Output Operator [FS_29] + File Output Operator [FS_30] Limit [LIM_28] (rows=100 width=135) Number of rows:100 Select Operator [SEL_27] (rows=261364852 width=135) @@ -70,7 +70,7 @@ Stage-0 <-Reducer 5 [SIMPLE_EDGE] SHUFFLE [RS_26] Select Operator [SEL_24] (rows=261364852 width=135) - Output:["_col0","_col1","_col2","_col3","_col4"] + Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_23] (rows=261364852 width=135) Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col4 DESC NULLS LAST","partition by:":"(grouping(_col5, 1) + grouping(_col5, 0)), CASE WHEN ((UDFToInteger(grouping(_col5, 0)) = 0)) THEN (_col0) ELSE (null) END"}] Select Operator [SEL_22] (rows=261364852 width=135) @@ -89,28 +89,28 @@ Stage-0 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)"],keys:_col0, _col1, 0 Select Operator [SEL_15] (rows=174243235 width=135) Output:["_col0","_col1","_col2"] - Merge Join Operator [MERGEJOIN_39] (rows=174243235 width=135) + Merge Join Operator [MERGEJOIN_40] (rows=174243235 width=135) Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col2","_col6","_col7"] <-Map 8 [SIMPLE_EDGE] SHUFFLE [RS_13] PartitionCols:_col0 Select Operator [SEL_8] (rows=462000 width=1436) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_37] (rows=462000 width=1436) + Filter Operator [FIL_38] (rows=462000 width=1436) predicate:i_item_sk is not null TableScan [TS_6] (rows=462000 width=1436) default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_class","i_category"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_12] PartitionCols:_col1 - Merge Join Operator [MERGEJOIN_38] (rows=158402938 width=135) + Merge Join Operator [MERGEJOIN_39] (rows=158402938 width=135) Conds:RS_9._col0=RS_10._col0(Inner),Output:["_col1","_col2"] <-Map 1 [SIMPLE_EDGE] SHUFFLE [RS_9] PartitionCols:_col0 Select Operator [SEL_2] (rows=144002668 width=135) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_35] (rows=144002668 width=135) + Filter Operator [FIL_36] (rows=144002668 width=135) predicate:(ws_sold_date_sk is not null and ws_item_sk is not null) TableScan [TS_0] (rows=144002668 width=135) default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_net_paid"] @@ -119,7 +119,7 @@ Stage-0 PartitionCols:_col0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] - Filter Operator [FIL_36] (rows=8116 width=1119) + Filter Operator [FIL_37] (rows=8116 width=1119) predicate:(d_month_seq BETWEEN 1212 AND 1223 and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"] diff --git a/ql/src/test/results/clientpositive/perf/query89.q.out b/ql/src/test/results/clientpositive/perf/query89.q.out index 7bc8700..9838792 100644 --- a/ql/src/test/results/clientpositive/perf/query89.q.out +++ b/ql/src/test/results/clientpositive/perf/query89.q.out @@ -64,10 +64,10 @@ Reducer 7 <- Reducer 6 (SIMPLE_EDGE) Stage-0 Fetch Operator - limit:100 + limit:-1 Stage-1 Reducer 7 - File Output Operator [FS_36] + File Output Operator [FS_37] Limit [LIM_35] (rows=100 width=88) Number of rows:100 Select Operator [SEL_34] (rows=191662559 width=88) @@ -75,8 +75,8 @@ Stage-0 <-Reducer 6 [SIMPLE_EDGE] SHUFFLE [RS_33] Select Operator [SEL_30] (rows=191662559 width=88) - Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"] - Filter Operator [FIL_46] (rows=191662559 width=88) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"] + Filter Operator [FIL_47] (rows=191662559 width=88) predicate:CASE WHEN ((avg_window_0 <> 0)) THEN (((abs((_col6 - avg_window_0)) / avg_window_0) > 0.1)) ELSE (null) END Select Operator [SEL_29] (rows=383325119 width=88) Output:["avg_window_0","_col0","_col1","_col2","_col3","_col4","_col5","_col6"] @@ -98,42 +98,42 @@ Stage-0 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col3)"],keys:_col5, _col6, _col7, _col10, _col12, _col13 Select Operator [SEL_21] (rows=766650239 width=88) Output:["_col5","_col6","_col7","_col10","_col12","_col13","_col3"] - Merge Join Operator [MERGEJOIN_53] (rows=766650239 width=88) + Merge Join Operator [MERGEJOIN_54] (rows=766650239 width=88) Conds:RS_18._col2=RS_19._col0(Inner),Output:["_col3","_col5","_col6","_col7","_col10","_col12","_col13"] <-Map 10 [SIMPLE_EDGE] SHUFFLE [RS_19] PartitionCols:_col0 Select Operator [SEL_11] (rows=1704 width=1910) Output:["_col0","_col1","_col2"] - Filter Operator [FIL_50] (rows=1704 width=1910) + Filter Operator [FIL_51] (rows=1704 width=1910) predicate:s_store_sk is not null TableScan [TS_9] (rows=1704 width=1910) default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_company_name"] <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_18] PartitionCols:_col2 - Merge Join Operator [MERGEJOIN_52] (rows=696954748 width=88) + Merge Join Operator [MERGEJOIN_53] (rows=696954748 width=88) Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col7","_col10"] <-Map 9 [SIMPLE_EDGE] SHUFFLE [RS_16] PartitionCols:_col0 Select Operator [SEL_8] (rows=36525 width=1119) Output:["_col0","_col2"] - Filter Operator [FIL_49] (rows=36525 width=1119) + Filter Operator [FIL_50] (rows=36525 width=1119) predicate:((d_year) IN (2000) and d_date_sk is not null) TableScan [TS_6] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_15] PartitionCols:_col0 - Merge Join Operator [MERGEJOIN_51] (rows=633595212 width=88) + Merge Join Operator [MERGEJOIN_52] (rows=633595212 width=88) Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col0","_col2","_col3","_col5","_col6","_col7"] <-Map 1 [SIMPLE_EDGE] SHUFFLE [RS_12] PartitionCols:_col1 Select Operator [SEL_2] (rows=575995635 width=88) Output:["_col0","_col1","_col2","_col3"] - Filter Operator [FIL_47] (rows=575995635 width=88) + Filter Operator [FIL_48] (rows=575995635 width=88) predicate:(ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null) TableScan [TS_0] (rows=575995635 width=88) default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_sales_price"] @@ -142,7 +142,7 @@ Stage-0 PartitionCols:_col0 Select Operator [SEL_5] (rows=231000 width=1436) Output:["_col0","_col1","_col2","_col3"] - Filter Operator [FIL_48] (rows=231000 width=1436) + Filter Operator [FIL_49] (rows=231000 width=1436) predicate:(((i_class) IN ('wallpaper', 'parenting', 'musical') or (i_class) IN ('womens', 'birdal', 'pants')) and ((i_category) IN ('Home', 'Books', 'Electronics') or (i_category) IN ('Shoes', 'Jewelry', 'Men')) and (((i_category) IN ('Home', 'Books', 'Electronics') and (i_class) IN ('wallpaper', 'parenting', 'musical')) or ((i_category) IN ('Shoes', 'Jewelry', 'Men') and (i_class) IN ('womens', 'birdal', 'pants'))) and i_item_sk is not null) TableScan [TS_3] (rows=462000 width=1436) default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand","i_class","i_category"] diff --git a/ql/src/test/results/clientpositive/spark/cbo_limit.q.out b/ql/src/test/results/clientpositive/spark/cbo_limit.q.out index c582578..85458a4 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_limit.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_limit.q.out @@ -66,8 +66,8 @@ POSTHOOK: Input: default@cbo_t2 POSTHOOK: Input: default@cbo_t2@dt=2014 POSTHOOK: Input: default@cbo_t3 #### A masked pattern was here #### -1 12 6 1 2 6 +1 12 6 PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int, c desc limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/vector_decimal_round.q.out index 1e76ba2..cb18406 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out @@ -100,11 +100,11 @@ STAGE PLANS: alias: decimal_tbl_txt Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)) - outputColumnNames: _col0 + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, -1) (type: decimal(11,0)) + key expressions: _col2 (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -234,11 +234,11 @@ STAGE PLANS: alias: decimal_tbl_rc Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)) - outputColumnNames: _col0 + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, -1) (type: decimal(11,0)) + key expressions: _col2 (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -369,11 +369,11 @@ STAGE PLANS: alias: decimal_tbl_orc Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)) - outputColumnNames: _col0 + expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) + outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, -1) (type: decimal(11,0)) + key expressions: _col2 (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out index dd4b7d1..03db353 100644 --- a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out @@ -478,28 +478,20 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE Select Operator + expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) + outputColumnNames: _col0, _col1 Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: CAST( 5-5 AS INTERVAL YEAR TO MONTH) (type: interval_year_month) - sort order: + - Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 + Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized - Reduce Operator Tree: - Select Operator - expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 2 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator