diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java new file mode 100644 index 0000000..bbf518d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.calcite; + +import java.util.HashSet; +import java.util.List; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagate; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.RexNodeConverter; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; + +public class HiveRexExecutorImpl implements RelOptPlanner.Executor { + + private final RelOptCluster cluster; + + public HiveRexExecutorImpl(RelOptCluster cluster) { + this.cluster = cluster; + } + + @Override + public void reduce(RexBuilder rexBuilder, List constExps, List reducedValues) { + RexNodeConverter rexNodeConverter = new RexNodeConverter(cluster); + for (RexNode rexNode : constExps) { + // initialize the converter + ExprNodeConverter converter = new ExprNodeConverter("", null, null, null, + new HashSet(), cluster.getTypeFactory()); + // convert RexNode to ExprNodeGenericFuncDesc + ExprNodeDesc expr = rexNode.accept(converter); + if (expr instanceof ExprNodeGenericFuncDesc) { + // folding the constant + ExprNodeDesc constant = ConstantPropagateProcFactory + .foldExpr((ExprNodeGenericFuncDesc) expr); + if (constant != null) { + try { + // convert constant back to RexNode + reducedValues.add(rexNodeConverter.convert((ExprNodeConstantDesc) constant)); + } catch (Exception e) { + e.printStackTrace(); + reducedValues.add(rexNode); + } + } else { + reducedValues.add(rexNode); + } + } else { + reducedValues.add(rexNode); + } + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java new file mode 100644 index 0000000..17670de --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java @@ -0,0 +1,853 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptPredicateList; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.rel.RelCollation; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.EquiJoin; +import org.apache.calcite.rel.core.Join; +import org.apache.calcite.rel.core.JoinInfo; +import org.apache.calcite.rel.logical.LogicalCalc; +import org.apache.calcite.rel.logical.LogicalValues; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexCorrelVariable; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.rex.RexFieldAccess; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexLocalRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexOver; +import org.apache.calcite.rex.RexProgram; +import org.apache.calcite.rex.RexProgramBuilder; +import org.apache.calcite.rex.RexRangeRef; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.rex.RexVisitorImpl; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.fun.SqlRowOperator; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.Stacks; +import org.apache.calcite.util.Util; +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +/** + * Collection of planner rules that apply various simplifying transformations on + * RexNode trees. Currently, there are two transformations: + * + *
    + *
  • Constant reduction, which evaluates constant subtrees, replacing them + * with a corresponding RexLiteral + *
  • Removal of redundant casts, which occurs when the argument into the cast + * is the same as the type of the resulting cast expression + *
+ */ +public abstract class HiveReduceExpressionsRule extends RelOptRule { + // ~ Static fields/initializers --------------------------------------------- + + /** + * Regular expression that matches the description of all instances of this + * rule and {@link ValuesReduceRule} also. Use it to prevent the planner from + * invoking these rules. + */ + public static final Pattern EXCLUSION_PATTERN = Pattern + .compile("Reduce(Expressions|Values)Rule.*"); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.logical.HiveFilter}. If the condition is a + * constant, the filter is removed (if TRUE) or replaced with an empty + * {@link org.apache.calcite.rel.core.Values} (if FALSE or NULL). + */ + public static final HiveReduceExpressionsRule FILTER_INSTANCE = new HiveReduceExpressionsRule( + HiveFilter.class, "HiveReduceExpressionsRule(Filter)") { + public void onMatch(RelOptRuleCall call) { + final HiveFilter filter = call.rel(0); + final List expList = Lists.newArrayList(filter.getCondition()); + RexNode newConditionExp; + boolean reduced; + final RelOptPredicateList predicates = RelMetadataQuery.getPulledUpPredicates(filter + .getInput()); + if (reduceExpressions(filter, expList, predicates)) { + assert expList.size() == 1; + newConditionExp = expList.get(0); + reduced = true; + } else { + // No reduction, but let's still test the original + // predicate to see if it was already a constant, + // in which case we don't need any runtime decision + // about filtering. + newConditionExp = filter.getCondition(); + reduced = false; + } + if (newConditionExp.isAlwaysTrue()) { + call.transformTo(filter.getInput()); + } else if (newConditionExp instanceof RexLiteral + || RexUtil.isNullLiteral(newConditionExp, true)) { + //TODO: similar to null scan optimization + //call.transformTo(LogicalValues.createEmpty(filter.getCluster(), filter.getRowType())); + return; + } else if (reduced) { + call.transformTo(RelOptUtil.createFilter(filter.getInput(), expList.get(0))); + } else { + if (newConditionExp instanceof RexCall) { + RexCall rexCall = (RexCall) newConditionExp; + boolean reverse = rexCall.getOperator() == SqlStdOperatorTable.NOT; + if (reverse) { + rexCall = (RexCall) rexCall.getOperands().get(0); + } + reduceNotNullableFilter(call, filter, rexCall, reverse); + } + return; + } + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(filter, 0.0); + } + + private void reduceNotNullableFilter(RelOptRuleCall call, HiveFilter filter, RexCall rexCall, + boolean reverse) { + // If the expression is a IS [NOT] NULL on a non-nullable + // column, then we can either remove the filter or replace + // it with an Empty. + boolean alwaysTrue; + switch (rexCall.getKind()) { + case IS_NULL: + case IS_UNKNOWN: + alwaysTrue = false; + break; + case IS_NOT_NULL: + alwaysTrue = true; + break; + default: + return; + } + if (reverse) { + alwaysTrue = !alwaysTrue; + } + RexNode operand = rexCall.getOperands().get(0); + if (operand instanceof RexInputRef) { + RexInputRef inputRef = (RexInputRef) operand; + if (!inputRef.getType().isNullable()) { + if (alwaysTrue) { + call.transformTo(filter.getInput()); + } else { + call.transformTo(LogicalValues.createEmpty(filter.getCluster(), filter.getRowType())); + } + } + } + } + }; + + public static final HiveReduceExpressionsRule PROJECT_INSTANCE = new HiveReduceExpressionsRule( + HiveProject.class, "HiveReduceExpressionsRule(Project)") { + + public boolean matches(RelOptRuleCall call) { + HiveProject project = call.rel(0); + HiveRulesRegistry registry = call.getPlanner(). + getContext().unwrap(HiveRulesRegistry.class); + + // If this operator has been visited already by the rule, + // we do not need to apply the optimization + if (registry != null && registry.getVisited(this).contains(project)) { + return false; + } + + return true; + } + + public void onMatch(RelOptRuleCall call) { + HiveProject project = call.rel(0); + // 0. Register that we have visited this operator in this rule + HiveRulesRegistry registry = call.getPlanner(). + getContext().unwrap(HiveRulesRegistry.class); + if (registry != null) { + registry.registerVisited(this, project); + } + final RelOptPredicateList predicates = RelMetadataQuery.getPulledUpPredicates(project + .getInput()); + final List expList = Lists.newArrayList(project.getProjects()); + if (reduceExpressions(project, expList, predicates)) { + HiveProject newProject = HiveProject.create(project.getCluster(), project.getInput(), expList, + project.getRowType(), project.getCollationList()); + if (registry != null) { + registry.registerVisited(this, newProject); + } + call.transformTo(newProject); + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(project, 0.0); + } + } + }; + + public static final HiveReduceExpressionsRule JOIN_INSTANCE = new HiveReduceExpressionsRule( + Join.class, "HiveReduceExpressionsRule(Join)") { + public void onMatch(RelOptRuleCall call) { + final Join join = call.rel(0); + final List expList = Lists.newArrayList(join.getCondition()); + final int fieldCount = join.getLeft().getRowType().getFieldCount(); + final RelOptPredicateList leftPredicates = RelMetadataQuery.getPulledUpPredicates(join + .getLeft()); + final RelOptPredicateList rightPredicates = RelMetadataQuery.getPulledUpPredicates(join + .getRight()); + final RelOptPredicateList predicates = leftPredicates + .union(rightPredicates.shift(fieldCount)); + if (!reduceExpressions(join, expList, predicates)) { + return; + } + if (join instanceof EquiJoin) { + final JoinInfo joinInfo = JoinInfo.of(join.getLeft(), join.getRight(), expList.get(0)); + if (!joinInfo.isEqui()) { + // This kind of join must be an equi-join, and the condition is + // no longer an equi-join. SemiJoin is an example of this. + return; + } + } + call.transformTo(join.copy(join.getTraitSet(), expList.get(0), join.getLeft(), + join.getRight(), join.getJoinType(), join.isSemiJoinDone())); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(join, 0.0); + } + }; + + public static final HiveReduceExpressionsRule CALC_INSTANCE = new HiveReduceExpressionsRule( + LogicalCalc.class, "HiveReduceExpressionsRule(Calc)") { + public void onMatch(RelOptRuleCall call) { + LogicalCalc calc = call.rel(0); + RexProgram program = calc.getProgram(); + final List exprList = program.getExprList(); + + // Form a list of expressions with sub-expressions fully expanded. + final List expandedExprList = Lists.newArrayList(); + final RexShuttle shuttle = new RexShuttle() { + public RexNode visitLocalRef(RexLocalRef localRef) { + return expandedExprList.get(localRef.getIndex()); + } + }; + for (RexNode expr : exprList) { + expandedExprList.add(expr.accept(shuttle)); + } + final RelOptPredicateList predicates = RelOptPredicateList.EMPTY; + if (reduceExpressions(calc, expandedExprList, predicates)) { + final RexProgramBuilder builder = new RexProgramBuilder(calc.getInput().getRowType(), calc + .getCluster().getRexBuilder()); + final List list = Lists.newArrayList(); + for (RexNode expr : expandedExprList) { + list.add(builder.registerInput(expr)); + } + if (program.getCondition() != null) { + final int conditionIndex = program.getCondition().getIndex(); + final RexNode newConditionExp = expandedExprList.get(conditionIndex); + if (newConditionExp.isAlwaysTrue()) { + // condition is always TRUE - drop it + } else if (newConditionExp instanceof RexLiteral + || RexUtil.isNullLiteral(newConditionExp, true)) { + // condition is always NULL or FALSE - replace calc + // with empty + call.transformTo(LogicalValues.createEmpty(calc.getCluster(), calc.getRowType())); + return; + } else { + builder.addCondition(list.get(conditionIndex)); + } + } + int k = 0; + for (RexLocalRef projectExpr : program.getProjectList()) { + final int index = projectExpr.getIndex(); + builder.addProject(list.get(index).getIndex(), program.getOutputRowType().getFieldNames() + .get(k++)); + } + call.transformTo(LogicalCalc.create(calc.getInput(), builder.getProgram())); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(calc, 0.0); + } + } + }; + + // ~ Constructors ----------------------------------------------------------- + + /** + * Creates a HiveReduceExpressionsRule. + * + * @param clazz + * class of rels to which this rule should apply + */ + private HiveReduceExpressionsRule(Class clazz, String desc) { + super(operand(clazz, any()), desc); + } + + // ~ Methods ---------------------------------------------------------------- + + /** + * Reduces a list of expressions. + * + * @param rel + * Relational expression + * @param expList + * List of expressions, modified in place + * @param predicates + * Constraints known to hold on input expressions + * @return whether reduction found something to change, and succeeded + */ + static boolean reduceExpressions(RelNode rel, List expList, + RelOptPredicateList predicates) { + RexBuilder rexBuilder = rel.getCluster().getRexBuilder(); + + // Replace predicates on CASE to CASE on predicates. + for (int i = 0; i < expList.size(); i++) { + RexNode exp = expList.get(i); + if (exp instanceof RexCall) { + RexNode exp2 = pushPredicateIntoCase((RexCall) exp); + if (exp2 != exp) { + expList.set(i, exp2); + } + } + } + + // Find reducible expressions. + final List constExps = Lists.newArrayList(); + List addCasts = Lists.newArrayList(); + final List removableCasts = Lists.newArrayList(); + final ImmutableMap constants = predicateConstants(predicates); + findReducibleExps(rel.getCluster().getTypeFactory(), expList, constants, constExps, addCasts, + removableCasts); + if (constExps.isEmpty() && removableCasts.isEmpty()) { + return false; + } + + // Remove redundant casts before reducing constant expressions. + // If the argument to the redundant cast is a reducible constant, + // reducing that argument to a constant first will result in not being + // able to locate the original cast expression. + if (!removableCasts.isEmpty()) { + final List reducedExprs = Lists.newArrayList(); + for (RexNode exp : removableCasts) { + RexCall call = (RexCall) exp; + reducedExprs.add(call.getOperands().get(0)); + } + RexReplacer replacer = new RexReplacer(rexBuilder, removableCasts, reducedExprs, + Collections.nCopies(removableCasts.size(), false)); + replacer.mutate(expList); + } + + if (constExps.isEmpty()) { + return true; + } + + final List constExps2 = Lists.newArrayList(constExps); + if (!constants.isEmpty()) { + // noinspection unchecked + final List> pairs = (List>) (List) Lists + .newArrayList(constants.entrySet()); + RexReplacer replacer = new RexReplacer(rexBuilder, Pair.left(pairs), Pair.right(pairs), + Collections.nCopies(pairs.size(), false)); + replacer.mutate(constExps2); + } + + // Compute the values they reduce to. + RelOptPlanner.Executor executor = rel.getCluster().getPlanner().getExecutor(); + if (executor == null) { + // Cannot reduce expressions: caller has not set an executor in their + // environment. Caller should execute something like the following before + // invoking the planner: + // + // final RexExecutorImpl executor = + // new RexExecutorImpl(Schemas.createDataContext(null)); + // rootRel.getCluster().getPlanner().setExecutor(executor); + return false; + } + + final List reducedValues = Lists.newArrayList(); + executor.reduce(rexBuilder, constExps2, reducedValues); + + // For Project, we have to be sure to preserve the result + // types, so always cast regardless of the expression type. + // For other RelNodes like Filter, in general, this isn't necessary, + // and the presence of casts could hinder other rules such as sarg + // analysis, which require bare literals. But there are special cases, + // like when the expression is a UDR argument, that need to be + // handled as special cases. + if (rel instanceof HiveProject) { + addCasts = Collections.nCopies(reducedValues.size(), true); + } + + RexReplacer replacer = new RexReplacer(rexBuilder, constExps, reducedValues, addCasts); + replacer.mutate(expList); + return true; + } + + /** + * Locates expressions that can be reduced to literals or converted to + * expressions with redundant casts removed.R + * + * @param typeFactory + * Type factory + * @param exps + * list of candidate expressions to be examined for reduction + * @param constants + * List of expressions known to be constant + * @param constExps + * returns the list of expressions that can be constant reduced + * @param addCasts + * indicator for each expression that can be constant reduced, + * whether a cast of the resulting reduced expression is potentially + * necessary + * @param removableCasts + * returns the list of cast expressions where the cast + */ + private static void findReducibleExps(RelDataTypeFactory typeFactory, List exps, + ImmutableMap constants, List constExps, List addCasts, + List removableCasts) { + ReducibleExprLocator gardener = new ReducibleExprLocator(typeFactory, constants, constExps, + addCasts, removableCasts); + for (RexNode exp : exps) { + gardener.analyze(exp); + } + assert constExps.size() == addCasts.size(); + } + + private static ImmutableMap predicateConstants(RelOptPredicateList predicates) { + // We cannot use an ImmutableMap.Builder here. If there are multiple entries + // with the same key (e.g. "WHERE deptno = 1 AND deptno = 2"), it doesn't + // matter which we take, so the latter will replace the former. + // The basic idea is to find all the pairs of RexNode = RexLiteral + // (1) If 'predicates' contain a non-EQUALS, we bail out. + // (2) It is OK if a RexNode is equal to the same RexLiteral several times, + // (e.g. "WHERE deptno = 1 AND deptno = 1") + // (3) It will return false if there are inconsistent constraints (e.g. + // "WHERE deptno = 1 AND deptno = 2") + Map builder = Maps.newHashMap(); + boolean findUsefulConstants = true; + for (RexNode predicate : predicates.pulledUpPredicates) { + if (predicate.getKind().equals(SqlKind.EQUALS)) { + final List operands = ((RexCall) predicate).getOperands(); + if (operands.size() != 2) { + findUsefulConstants = false; + break; + } else { + if (operands.get(1) instanceof RexLiteral) { + RexLiteral literal = builder.get(operands.get(0)); + if (literal == null) { + builder.put(operands.get(0), (RexLiteral) operands.get(1)); + } else { + RexLiteral newLiteral = (RexLiteral) operands.get(1); + if (!literal.getValue().equals(newLiteral)) { + // should return false + // we bail out, the reduce filter expression rule should be able + // to deal with this. + findUsefulConstants = false; + break; + } + } + } else if (operands.get(0) instanceof RexLiteral) { + RexLiteral literal = builder.get(operands.get(1)); + if (literal == null) { + builder.put(operands.get(1), (RexLiteral) operands.get(0)); + } else { + RexLiteral newLiteral = (RexLiteral) operands.get(0); + if (!literal.getValue().equals(newLiteral)) { + // should return false + // we bail out, the reduce filter expression rule should be able + // to deal with this. + findUsefulConstants = false; + break; + } + } + } else { + findUsefulConstants = false; + break; + } + } + } else { + findUsefulConstants = false; + } + } + if (!findUsefulConstants) { + builder = Maps.newHashMap(); + } + return ImmutableMap.copyOf(builder); + } + + private static RexCall pushPredicateIntoCase(RexCall call) { + if (call.getType().getSqlTypeName() != SqlTypeName.BOOLEAN) { + return call; + } + int caseOrdinal = -1; + final List operands = call.getOperands(); + for (int i = 0; i < operands.size(); i++) { + RexNode operand = operands.get(i); + switch (operand.getKind()) { + case CASE: + caseOrdinal = i; + } + } + if (caseOrdinal < 0) { + return call; + } + // Convert + // f(CASE WHEN p1 THEN v1 ... END, arg) + // to + // CASE WHEN p1 THEN f(v1, arg) ... END + final RexCall case_ = (RexCall) operands.get(caseOrdinal); + final List nodes = new ArrayList<>(); + for (int i = 0; i < case_.getOperands().size(); i++) { + RexNode node = case_.getOperands().get(i); + if (!RexUtil.isCasePredicate(case_, i)) { + node = substitute(call, caseOrdinal, node); + } + nodes.add(node); + } + return case_.clone(call.getType(), nodes); + } + + /** + * Converts op(arg0, ..., argOrdinal, ..., argN) to op(arg0,..., node, ..., + * argN). + */ + private static RexNode substitute(RexCall call, int ordinal, RexNode node) { + final List newOperands = Lists.newArrayList(call.getOperands()); + newOperands.set(ordinal, node); + return call.clone(call.getType(), newOperands); + } + + // ~ Inner Classes ---------------------------------------------------------- + + /** + * Replaces expressions with their reductions. Note that we only have to look + * for RexCall, since nothing else is reducible in the first place. + */ + private static class RexReplacer extends RexShuttle { + private final RexBuilder rexBuilder; + private final List reducibleExps; + private final List reducedValues; + private final List addCasts; + + RexReplacer(RexBuilder rexBuilder, List reducibleExps, List reducedValues, + List addCasts) { + this.rexBuilder = rexBuilder; + this.reducibleExps = reducibleExps; + this.reducedValues = reducedValues; + this.addCasts = addCasts; + } + + @Override + public RexNode visitInputRef(RexInputRef inputRef) { + RexNode node = visit(inputRef); + if (node == null) { + return super.visitInputRef(inputRef); + } + return node; + } + + @Override + public RexNode visitCall(RexCall call) { + RexNode node = visit(call); + if (node != null) { + return node; + } + node = super.visitCall(call); + if (node != call) { + node = RexUtil.simplify(rexBuilder, node); + } + return node; + } + + private RexNode visit(final RexNode call) { + int i = reducibleExps.indexOf(call); + if (i == -1) { + return null; + } + RexNode replacement = reducedValues.get(i); + if (addCasts.get(i) && (replacement.getType() != call.getType())) { + // Handle change from nullable to NOT NULL by claiming + // that the result is still nullable, even though + // we know it isn't. + // + // Also, we cannot reduce CAST('abc' AS VARCHAR(4)) to 'abc'. + // If we make 'abc' of type VARCHAR(4), we may later encounter + // the same expression in a Project's digest where it has + // type VARCHAR(3), and that's wrong. + replacement = rexBuilder.makeAbstractCast(call.getType(), replacement); + } + return replacement; + } + } + + /** + * Helper class used to locate expressions that either can be reduced to + * literals or contain redundant casts. + */ + private static class ReducibleExprLocator extends RexVisitorImpl { + /** + * Whether an expression is constant, and if so, whether it can be reduced + * to a simpler constant. + */ + enum Constancy { + NON_CONSTANT, REDUCIBLE_CONSTANT, IRREDUCIBLE_CONSTANT + } + + private final RelDataTypeFactory typeFactory; + + private final List stack; + + private final ImmutableMap constants; + + private final List constExprs; + + private final List addCasts; + + private final List removableCasts; + + private final List parentCallTypeStack; + + ReducibleExprLocator(RelDataTypeFactory typeFactory, + ImmutableMap constants, List constExprs, + List addCasts, List removableCasts) { + // go deep + super(true); + this.typeFactory = typeFactory; + this.constants = constants; + this.constExprs = constExprs; + this.addCasts = addCasts; + this.removableCasts = removableCasts; + this.stack = Lists.newArrayList(); + this.parentCallTypeStack = Lists.newArrayList(); + } + + public void analyze(RexNode exp) { + assert stack.isEmpty(); + + exp.accept(this); + + // Deal with top of stack + assert stack.size() == 1; + assert parentCallTypeStack.isEmpty(); + Constancy rootConstancy = stack.get(0); + if (rootConstancy == Constancy.REDUCIBLE_CONSTANT) { + // The entire subtree was constant, so add it to the result. + addResult(exp); + } + stack.clear(); + } + + private Void pushVariable() { + stack.add(Constancy.NON_CONSTANT); + return null; + } + + private void addResult(RexNode exp) { + // Cast of literal can't be reduced, so skip those (otherwise we'd + // go into an infinite loop as we add them back). + if (exp.getKind() == SqlKind.CAST) { + RexCall cast = (RexCall) exp; + RexNode operand = cast.getOperands().get(0); + if (operand instanceof RexLiteral) { + return; + } + } + constExprs.add(exp); + + // In the case where the expression corresponds to a UDR argument, + // we need to preserve casts. Note that this only applies to + // the topmost argument, not expressions nested within the UDR + // call. + // + // REVIEW zfong 6/13/08 - Are there other expressions where we + // also need to preserve casts? + if (parentCallTypeStack.isEmpty()) { + addCasts.add(false); + } else { + addCasts.add(isUdf(Stacks.peek(parentCallTypeStack))); + } + } + + private Boolean isUdf(SqlOperator operator) { + // return operator instanceof UserDefinedRoutine + return false; + } + + public Void visitInputRef(RexInputRef inputRef) { + if (constants.containsKey(inputRef)) { + stack.add(Constancy.REDUCIBLE_CONSTANT); + return null; + } + return pushVariable(); + } + + public Void visitLiteral(RexLiteral literal) { + stack.add(Constancy.IRREDUCIBLE_CONSTANT); + return null; + } + + public Void visitOver(RexOver over) { + // assume non-constant (running SUM(1) looks constant but isn't) + analyzeCall(over, Constancy.NON_CONSTANT); + return null; + } + + public Void visitCorrelVariable(RexCorrelVariable correlVariable) { + return pushVariable(); + } + + public Void visitCall(RexCall call) { + // assume REDUCIBLE_CONSTANT until proven otherwise + analyzeCall(call, Constancy.REDUCIBLE_CONSTANT); + return null; + } + + private void analyzeCall(RexCall call, Constancy callConstancy) { + Stacks.push(parentCallTypeStack, call.getOperator()); + + // visit operands, pushing their states onto stack + super.visitCall(call); + + // look for NON_CONSTANT operands + int operandCount = call.getOperands().size(); + List operandStack = Util.last(stack, operandCount); + for (Constancy operandConstancy : operandStack) { + if (operandConstancy == Constancy.NON_CONSTANT) { + callConstancy = Constancy.NON_CONSTANT; + } + } + + // Even if all operands are constant, the call itself may + // be non-deterministic. + if (!call.getOperator().isDeterministic()) { + callConstancy = Constancy.NON_CONSTANT; + } else if (call.getOperator().isDynamicFunction()) { + // We can reduce the call to a constant, but we can't + // cache the plan if the function is dynamic. + // For now, treat it same as non-deterministic. + callConstancy = Constancy.NON_CONSTANT; + } + + // Row operator itself can't be reduced to a literal, but if + // the operands are constants, we still want to reduce those + if ((callConstancy == Constancy.REDUCIBLE_CONSTANT) + && (call.getOperator() instanceof SqlRowOperator)) { + callConstancy = Constancy.NON_CONSTANT; + } + + if (callConstancy == Constancy.NON_CONSTANT) { + // any REDUCIBLE_CONSTANT children are now known to be maximal + // reducible subtrees, so they can be added to the result + // list + for (int iOperand = 0; iOperand < operandCount; ++iOperand) { + Constancy constancy = operandStack.get(iOperand); + if (constancy == Constancy.REDUCIBLE_CONSTANT) { + addResult(call.getOperands().get(iOperand)); + } + } + + // if this cast expression can't be reduced to a literal, + // then see if we can remove the cast + if (call.getOperator() == SqlStdOperatorTable.CAST) { + reduceCasts(call); + } + } + + // pop operands off of the stack + operandStack.clear(); + + // pop this parent call operator off the stack + Stacks.pop(parentCallTypeStack, call.getOperator()); + + // push constancy result for this call onto stack + stack.add(callConstancy); + } + + private void reduceCasts(RexCall outerCast) { + List operands = outerCast.getOperands(); + if (operands.size() != 1) { + return; + } + RelDataType outerCastType = outerCast.getType(); + RelDataType operandType = operands.get(0).getType(); + if (operandType.equals(outerCastType)) { + removableCasts.add(outerCast); + return; + } + + // See if the reduction + // CAST((CAST x AS type) AS type NOT NULL) + // -> CAST(x AS type NOT NULL) + // applies. TODO jvs 15-Dec-2008: consider + // similar cases for precision changes. + if (!(operands.get(0) instanceof RexCall)) { + return; + } + RexCall innerCast = (RexCall) operands.get(0); + if (innerCast.getOperator() != SqlStdOperatorTable.CAST) { + return; + } + if (innerCast.getOperands().size() != 1) { + return; + } + RelDataType outerTypeNullable = typeFactory.createTypeWithNullability(outerCastType, true); + RelDataType innerTypeNullable = typeFactory.createTypeWithNullability(operandType, true); + if (outerTypeNullable != innerTypeNullable) { + return; + } + if (operandType.isNullable()) { + removableCasts.add(innerCast); + } + } + + public Void visitDynamicParam(RexDynamicParam dynamicParam) { + return pushVariable(); + } + + public Void visitRangeRef(RexRangeRef rangeRef) { + return pushVariable(); + } + + public Void visitFieldAccess(RexFieldAccess fieldAccess) { + return pushVariable(); + } + } + +} + +// End HiveReduceExpressionsRule.java + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java index 1f5d919..1428ab1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java @@ -150,8 +150,9 @@ static ASTNode literal(RexLiteral literal, boolean useTypeQualInLiteral) { switch (sqlType) { case BINARY: ByteString bs = (ByteString) literal.getValue(); - val = bs.byteAt(0); - type = HiveParser.BigintLiteral; + val = bs.toString(); + val = "'" + val + "'"; + type = HiveParser.StringLiteral; break; case TINYINT: if (useTypeQualInLiteral) { @@ -217,7 +218,7 @@ static ASTNode literal(RexLiteral literal, boolean useTypeQualInLiteral) { case TIMESTAMP: { val = literal.getValue(); type = HiveParser.TOK_TIMESTAMPLITERAL; - DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); val = df.format(((Calendar) val).getTime()); val = "'" + val + "'"; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java index de8ab80..94024d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java @@ -547,7 +547,7 @@ public ASTNode visitCall(RexCall call) { SqlOperator op = call.getOperator(); List astNodeLst = new LinkedList(); if (op.kind == SqlKind.CAST) { - HiveToken ht = TypeConverter.hiveToken(call.getType()); + HiveToken ht = TypeConverter.hiveToken(call); ASTBuilder astBldr = ASTBuilder.construct(ht.type, ht.text); if (ht.args != null) { for (String castArg : ht.args) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java index 631a4ca..09c2bdf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Map; +import org.apache.calcite.avatica.util.ByteString; import org.apache.calcite.avatica.util.TimeUnit; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rel.RelNode; @@ -112,6 +113,10 @@ private InputCtx(RelDataType calciteInpDataType, ImmutableMap h private final ImmutableList inputCtxs; private final boolean flattenExpr; + public RexNodeConverter(RelOptCluster cluster) { + this(cluster, new ArrayList(), false); + } + public RexNodeConverter(RelOptCluster cluster, RelDataType inpDataType, ImmutableMap nameToPosMap, int offset, boolean flattenExpr) { this.cluster = cluster; @@ -321,6 +326,10 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx coi); RexNode calciteLiteral = null; + if (value == null) { + return cluster.getRexBuilder().makeLiteral(null, + cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true); + } // TODO: Verify if we need to use ConstantObjectInspector to unwrap data switch (hiveTypeCategory) { case BOOLEAN: @@ -430,6 +439,8 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true); break; case BINARY: + calciteLiteral = cluster.getRexBuilder().makeBinaryLiteral(new ByteString((byte[]) value)); + break; case UNKNOWN: default: throw new RuntimeException("UnSupported Literal"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java index 2825f77..eeeedf8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java @@ -28,6 +28,7 @@ import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.sql.SqlIntervalQualifier; import org.apache.calcite.sql.type.SqlTypeName; @@ -321,9 +322,9 @@ public static TypeInfo convertPrimitiveType(RelDataType rType) { } /*********************** Convert Calcite Types To Hive Types ***********************/ - public static HiveToken hiveToken(RelDataType calciteType) { + public static HiveToken hiveToken(RexCall call) { HiveToken ht = null; - + RelDataType calciteType = call.getType(); switch (calciteType.getSqlTypeName()) { case CHAR: { ht = new HiveToken(HiveParser.TOK_CHAR, "TOK_CHAR", String.valueOf(calciteType.getPrecision())); @@ -343,6 +344,14 @@ public static HiveToken hiveToken(RelDataType calciteType) { .getPrecision()), String.valueOf(calciteType.getScale())); } break; + case BINARY: { + if (call.getOperands().get(0).getType().getSqlTypeName().getName().equals("BINARY")) { + ht = new HiveToken(HiveParser.Identifier, "unhex"); + } else { + ht = new HiveToken(HiveParser.Identifier, "binary"); + } + } + break; default: ht = calciteToHiveTypeNameMap.get(calciteType.getSqlTypeName().getName()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index d029f4b..1c8f602 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -38,6 +38,7 @@ import org.antlr.runtime.tree.TreeVisitorAction; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptPlanner.Executor; import org.apache.calcite.plan.RelOptQuery; import org.apache.calcite.plan.RelOptRule; import org.apache.calcite.plan.RelOptSchema; @@ -69,7 +70,6 @@ import org.apache.calcite.rel.rules.LoptOptimizeJoinRule; import org.apache.calcite.rel.rules.ProjectMergeRule; import org.apache.calcite.rel.rules.ProjectRemoveRule; -import org.apache.calcite.rel.rules.ReduceExpressionsRule; import org.apache.calcite.rel.rules.SemiJoinFilterTransposeRule; import org.apache.calcite.rel.rules.SemiJoinJoinTransposeRule; import org.apache.calcite.rel.rules.SemiJoinProjectTransposeRule; @@ -119,6 +119,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveDefaultRelMetadataProvider; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveHepPlannerContext; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexExecutorImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveVolcanoPlannerContext; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; @@ -150,6 +151,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePartitionPruneRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePreFilteringRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectMergeRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveReduceExpressionsRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRelFieldTrimmer; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveWindowingFixRule; @@ -844,10 +846,12 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // Create MD provider HiveDefaultRelMetadataProvider mdProvider = new HiveDefaultRelMetadataProvider(conf); + // Create executor + Executor executorProvider = new HiveRexExecutorImpl(cluster); // 2. Apply Pre Join Order optimizations calcitePreCboPlan = applyPreJoinOrderingTransforms(calciteGenPlan, - mdProvider.getMetadataProvider()); + mdProvider.getMetadataProvider(), executorProvider); // 3. Appy Join Order Optimizations using Hep Planner (MST Algorithm) List list = Lists.newArrayList(); @@ -861,9 +865,6 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveJoin.HIVE_JOIN_FACTORY, HiveProject.DEFAULT_PROJECT_FACTORY, HiveFilter.DEFAULT_FILTER_FACTORY)); - hepPgmBldr.addRuleInstance(ReduceExpressionsRule.JOIN_INSTANCE); - hepPgmBldr.addRuleInstance(ReduceExpressionsRule.FILTER_INSTANCE); - hepPgmBldr.addRuleInstance(ReduceExpressionsRule.PROJECT_INSTANCE); hepPgmBldr.addRuleInstance(ProjectRemoveRule.INSTANCE); hepPgmBldr.addRuleInstance(UnionMergeRule.INSTANCE); hepPgmBldr.addRuleInstance(new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY)); @@ -952,9 +953,12 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu * original plan * @param mdProvider * meta data provider + * @param executorProvider + * executor * @return */ - private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProvider mdProvider) { + private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, + RelMetadataProvider mdProvider, Executor executorProvider) { // TODO: Decorelation of subquery should be done before attempting // Partition Pruning; otherwise Expression evaluation may try to execute @@ -980,10 +984,16 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv } // 3. Constant propagation, common filter extraction, and PPD - basePlan = hepPlan(basePlan, true, mdProvider, - ReduceExpressionsRule.PROJECT_INSTANCE, - ReduceExpressionsRule.FILTER_INSTANCE, - ReduceExpressionsRule.JOIN_INSTANCE, + // Note: we need to run Projection Pruning for HiveReduceExpressionsRule first. + HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY, + HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY, + HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY, + HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY); + basePlan = fieldTrimmer.trim(basePlan); + basePlan = hepPlan(basePlan, true, mdProvider, executorProvider, + HiveReduceExpressionsRule.PROJECT_INSTANCE, + HiveReduceExpressionsRule.FILTER_INSTANCE, + HiveReduceExpressionsRule.JOIN_INSTANCE, HivePreFilteringRule.INSTANCE, new HiveFilterProjectTransposeRule(Filter.class, HiveFilter.DEFAULT_FILTER_FACTORY, HiveProject.class, HiveProject.DEFAULT_PROJECT_FACTORY), @@ -999,10 +1009,6 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv new HivePartitionPruneRule(conf)); // 5. Projection Pruning - HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY, - HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY, - HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY, - HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY); basePlan = fieldTrimmer.trim(basePlan); // 6. Rerun PPD through Project as column pruning would have introduced DT @@ -1025,7 +1031,23 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv */ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider, RelOptRule... rules) { - return hepPlan(basePlan, followPlanChanges, mdProvider, + return hepPlan(basePlan, followPlanChanges, mdProvider, null, + HepMatchOrder.TOP_DOWN, rules); + } + + /** + * Run the HEP Planner with the given rule set. + * + * @param basePlan + * @param followPlanChanges + * @param mdProvider + * @param executorProvider + * @param rules + * @return optimized RelNode + */ + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, Executor executorProvider, RelOptRule... rules) { + return hepPlan(basePlan, followPlanChanges, mdProvider, executorProvider, HepMatchOrder.TOP_DOWN, rules); } @@ -1039,8 +1061,8 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, * @param rules * @return optimized RelNode */ - private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider, - HepMatchOrder order, RelOptRule... rules) { + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, HepMatchOrder order, RelOptRule... rules) { RelNode optimizedRelNode = basePlan; HepProgramBuilder programBuilder = new HepProgramBuilder(); @@ -1063,10 +1085,52 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadata basePlan.getCluster().setMetadataProvider( new CachingRelMetadataProvider(chainedProvider, planner)); - // Executor is required for constant-reduction rules; see [CALCITE-566] - final RexExecutorImpl executor = - new RexExecutorImpl(Schemas.createDataContext(null)); - basePlan.getCluster().getPlanner().setExecutor(executor); + planner.setRoot(basePlan); + optimizedRelNode = planner.findBestExp(); + + return optimizedRelNode; + } + + /** + * Run the HEP Planner with the given rule set. + * + * @param basePlan + * @param followPlanChanges + * @param mdProvider + * @param executorProvider + * @param order + * @param rules + * @return optimized RelNode + */ + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, Executor executorProvider, HepMatchOrder order, + RelOptRule... rules) { + + RelNode optimizedRelNode = basePlan; + HepProgramBuilder programBuilder = new HepProgramBuilder(); + if (followPlanChanges) { + programBuilder.addMatchOrder(order); + programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules)); + } else { + // TODO: Should this be also TOP_DOWN? + for (RelOptRule r : rules) + programBuilder.addRuleInstance(r); + } + + HiveRulesRegistry registry = new HiveRulesRegistry(); + HiveHepPlannerContext context = new HiveHepPlannerContext(registry); + HepPlanner planner = new HepPlanner(programBuilder.build(), context); + + List list = Lists.newArrayList(); + list.add(mdProvider); + planner.registerMetadataProviders(list); + RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); + basePlan.getCluster().setMetadataProvider( + new CachingRelMetadataProvider(chainedProvider, planner)); + + if (executorProvider != null) { + basePlan.getCluster().getPlanner().setExecutor(executorProvider); + } planner.setRoot(basePlan); optimizedRelNode = planner.findBestExp(); diff --git a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out index fc4f294..788a17e 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out @@ -120,7 +120,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53))) (type: boolean) + predicate: ((((si = 2) or (si = 3) or (si = 4) or (si = 5) or (si = 6) or (si = 7) or (si = 8) or (si = 10) or (si = 11) or (si = 12) or (si = 13) or (si = 14) or (si = 15) or (si = 16) or (si = 17) or (si = 18) or (si = 28) or (si = 38) or (si = 48) or (si = 53)) and ((t = 1) or (t = 2) or (t = 3) or (t = 4) or (t = 5) or (t = 6) or (t = 7) or (t = 9) or (t = 10) or (t = 11) or (t = 12) or (t = 13) or (t = 14) or (t = 15) or (t = 16) or (t = 17) or (t = 27) or (t = 37) or (t = 47) or (t = 52))) and (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53)))) (type: boolean) Statistics: Num rows: 280 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 280 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE @@ -209,7 +209,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53))) (type: boolean) + predicate: ((((si = 2) or (si = 3) or (si = 4) or (si = 5) or (si = 6) or (si = 7) or (si = 8) or (si = 10) or (si = 11) or (si = 12) or (si = 13) or (si = 14) or (si = 15) or (si = 16) or (si = 17) or (si = 18) or (si = 28) or (si = 38) or (si = 48) or (si = 53)) and ((t = 1) or (t = 2) or (t = 3) or (t = 4) or (t = 5) or (t = 6) or (t = 7) or (t = 9) or (t = 10) or (t = 11) or (t = 12) or (t = 13) or (t = 14) or (t = 15) or (t = 16) or (t = 17) or (t = 27) or (t = 37) or (t = 47) or (t = 52))) and (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53)))) (type: boolean) Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out index c864c04..f6c9b02 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out @@ -419,12 +419,12 @@ STAGE PLANS: Select Operator expressions: ss_store_sk (type: int) outputColumnNames: _col0 - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE TableScan alias: s Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: PARTIAL @@ -447,10 +447,10 @@ STAGE PLANS: keys: 0 _col0 (type: int) 1 _col0 (type: int) - outputColumnNames: _col2 + outputColumnNames: _col1 Statistics: Num rows: 131 Data size: 524 Basic stats: COMPLETE Column stats: PARTIAL Select Operator - expressions: _col2 (type: int) + expressions: _col1 (type: int) outputColumnNames: _col0 Statistics: Num rows: 131 Data size: 524 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator @@ -503,12 +503,12 @@ STAGE PLANS: Select Operator expressions: s_store_sk (type: int) outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Join Operator condition map: @@ -557,12 +557,12 @@ STAGE PLANS: Select Operator expressions: ss_store_sk (type: int) outputColumnNames: _col0 - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE TableScan alias: s Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE @@ -585,10 +585,10 @@ STAGE PLANS: keys: 0 _col0 (type: int) 1 _col0 (type: int) - outputColumnNames: _col2 + outputColumnNames: _col1 Statistics: Num rows: 321 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col2 (type: int) + expressions: _col1 (type: int) outputColumnNames: _col0 Statistics: Num rows: 321 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -813,12 +813,12 @@ STAGE PLANS: Select Operator expressions: s_store_sk (type: int) outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE TableScan alias: s Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE @@ -884,12 +884,12 @@ STAGE PLANS: Select Operator expressions: ss_store_sk (type: int) outputColumnNames: _col0 - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE TableScan alias: s Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE @@ -929,10 +929,10 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) 2 _col0 (type: int) - outputColumnNames: _col2 + outputColumnNames: _col1 Statistics: Num rows: 273 Data size: 1092 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col2 (type: int) + expressions: _col1 (type: int) outputColumnNames: _col0 Statistics: Num rows: 273 Data size: 1092 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join14.q.out b/ql/src/test/results/clientpositive/auto_join14.q.out index 47e1724..5130e46 100644 --- a/ql/src/test/results/clientpositive/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/auto_join14.q.out @@ -67,10 +67,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col3) (type: int), _col1 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join19.q.out b/ql/src/test/results/clientpositive/auto_join19.q.out index 8a57cb0..616aa77 100644 --- a/ql/src/test/results/clientpositive/auto_join19.q.out +++ b/ql/src/test/results/clientpositive/auto_join19.q.out @@ -65,10 +65,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col2 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join32.q.out b/ql/src/test/results/clientpositive/auto_join32.q.out index 161ab6b..823e25f 100644 --- a/ql/src/test/results/clientpositive/auto_join32.q.out +++ b/ql/src/test/results/clientpositive/auto_join32.q.out @@ -408,13 +408,13 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 Select Operator - expressions: _col3 (type: string), _col1 (type: string) - outputColumnNames: _col3, _col1 + expressions: _col2 (type: string), _col1 (type: string) + outputColumnNames: _col2, _col1 Group By Operator aggregations: count(DISTINCT _col1) - keys: _col3 (type: string), _col1 (type: string) + keys: _col2 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join9.q.out b/ql/src/test/results/clientpositive/auto_join9.q.out index 13dd5de..5bc63ec 100644 --- a/ql/src/test/results/clientpositive/auto_join9.q.out +++ b/ql/src/test/results/clientpositive/auto_join9.q.out @@ -24,20 +24,20 @@ STAGE PLANS: Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:src1 + $hdt$_0:src2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:src1 + $hdt$_0:src2 TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: @@ -48,14 +48,14 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -63,10 +63,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/cast1.q.out b/ql/src/test/results/clientpositive/cast1.q.out index 0bdecba..48a0c14 100644 --- a/ql/src/test/results/clientpositive/cast1.q.out +++ b/ql/src/test/results/clientpositive/cast1.q.out @@ -105,11 +105,11 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] +POSTHOOK: Lineage: dest1.c3 SIMPLE [] +POSTHOOK: Lineage: dest1.c4 SIMPLE [] +POSTHOOK: Lineage: dest1.c5 SIMPLE [] POSTHOOK: Lineage: dest1.c6 EXPRESSION [] POSTHOOK: Lineage: dest1.c7 EXPRESSION [] PREHOOK: query: select dest1.* FROM dest1 diff --git a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out index 41f3d09..396a90e 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out @@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"15","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]} 15 15 15 diff --git a/ql/src/test/results/clientpositive/create_genericudf.q.out b/ql/src/test/results/clientpositive/create_genericudf.q.out index 586f0ba..db3a9b5 100644 --- a/ql/src/test/results/clientpositive/create_genericudf.q.out +++ b/ql/src/test/results/clientpositive/create_genericudf.q.out @@ -50,13 +50,13 @@ SELECT POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] POSTHOOK: Lineage: dest1.c3 EXPRESSION [] POSTHOOK: Lineage: dest1.c4 EXPRESSION [] POSTHOOK: Lineage: dest1.c5 EXPRESSION [] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [] +POSTHOOK: Lineage: dest1.c6 SIMPLE [] +POSTHOOK: Lineage: dest1.c7 SIMPLE [] PREHOOK: query: SELECT dest1.* FROM dest1 LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 diff --git a/ql/src/test/results/clientpositive/cte_2.q.out b/ql/src/test/results/clientpositive/cte_2.q.out index a8bc760..d6923ba 100644 --- a/ql/src/test/results/clientpositive/cte_2.q.out +++ b/ql/src/test/results/clientpositive/cte_2.q.out @@ -40,7 +40,7 @@ select * POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@s1 -POSTHOOK: Lineage: s1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: s1.key SIMPLE [] POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select * from s1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out index 8fa0a4c..f5a4d61 100644 --- a/ql/src/test/results/clientpositive/explain_logical.q.out +++ b/ql/src/test/results/clientpositive/explain_logical.q.out @@ -380,10 +380,10 @@ $hdt$_0:s2 keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_10) - expressions: _col3 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator (FS_11) @@ -415,7 +415,7 @@ $hdt$_1:s1 keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE PREHOOK: query: -- With views diff --git a/ql/src/test/results/clientpositive/flatten_and_or.q.out b/ql/src/test/results/clientpositive/flatten_and_or.q.out index 9c51ff3..c1b4302 100644 --- a/ql/src/test/results/clientpositive/flatten_and_or.q.out +++ b/ql/src/test/results/clientpositive/flatten_and_or.q.out @@ -44,7 +44,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3'))) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3')))) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out index 56c5176..19f574e 100644 --- a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out @@ -1619,38 +1619,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col3) > 15.0) and (UDFToDouble(_col3) < 25.0)) (type: boolean) + predicate: ((UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out index 6164a26..e3e4a50 100644 --- a/ql/src/test/results/clientpositive/groupby_ppd.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out @@ -28,16 +28,16 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: foo (type: int) - outputColumnNames: _col1 + outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 1 (type: int), _col1 (type: int) - outputColumnNames: _col0, _col1 + expressions: _col0 (type: int) + outputColumnNames: _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int) + keys: 1 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -54,16 +54,16 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: foo (type: int) - outputColumnNames: _col1 + outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 1 (type: int), _col1 (type: int) - outputColumnNames: _col0, _col1 + expressions: _col0 (type: int) + outputColumnNames: _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int) + keys: 1 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -79,7 +79,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col1 (type: int), _col0 (type: int) + expressions: _col1 (type: int), 1 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/input42.q.out b/ql/src/test/results/clientpositive/input42.q.out index 2974159..7658dc7 100644 --- a/ql/src/test/results/clientpositive/input42.q.out +++ b/ql/src/test/results/clientpositive/input42.q.out @@ -1724,14 +1724,53 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-0 is a root stage + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Partition Description: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: (rand(100) < 0.1) (type: boolean) + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### Partition + base file name: hr=11 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -1775,7 +1814,9 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcpart name: default.srcpart +#### A masked pattern was here #### Partition + base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -1819,20 +1860,15 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcpart name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=11 [a] + /srcpart/ds=2008-04-08/hr=12 [a] + + Stage: Stage-0 + Fetch Operator + limit: -1 Processor Tree: - TableScan - alias: a - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: (rand(100) < 0.1) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE - ListSink + ListSink PREHOOK: query: select * from srcpart a where a.ds='2008-04-08' and rand(100) < 0.1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/input_part6.q.out b/ql/src/test/results/clientpositive/input_part6.q.out index fa51cdf..c01d8af 100644 --- a/ql/src/test/results/clientpositive/input_part6.q.out +++ b/ql/src/test/results/clientpositive/input_part6.q.out @@ -19,7 +19,7 @@ STAGE PLANS: predicate: (UDFToDouble(ds) = 1996.0) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), '1996' (type: string), hr (type: string) + expressions: key (type: string), value (type: string), '1996.0' (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/insert1.q.out b/ql/src/test/results/clientpositive/insert1.q.out index 49dd2d5..7a2c429 100644 --- a/ql/src/test/results/clientpositive/insert1.q.out +++ b/ql/src/test/results/clientpositive/insert1.q.out @@ -26,7 +26,7 @@ POSTHOOK: query: insert overwrite table insert1 select a.key, a.value from inser POSTHOOK: type: QUERY POSTHOOK: Input: default@insert2 POSTHOOK: Output: default@insert1 -POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.key SIMPLE [] POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain insert into table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/join14.q.out b/ql/src/test/results/clientpositive/join14.q.out index 8b1d399..daf5766 100644 --- a/ql/src/test/results/clientpositive/join14.q.out +++ b/ql/src/test/results/clientpositive/join14.q.out @@ -67,10 +67,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col3) (type: int), _col1 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join28.q.out b/ql/src/test/results/clientpositive/join28.q.out index d748495..681d10f 100644 --- a/ql/src/test/results/clientpositive/join28.q.out +++ b/ql/src/test/results/clientpositive/join28.q.out @@ -109,10 +109,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out index afb373d..149a364 100644 --- a/ql/src/test/results/clientpositive/join32.q.out +++ b/ql/src/test/results/clientpositive/join32.q.out @@ -131,7 +131,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 $hdt$_2:x @@ -174,20 +174,20 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 Position of Big Table: 0 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join32_lessSize.q.out b/ql/src/test/results/clientpositive/join32_lessSize.q.out index 5ea4024..4c07c8b 100644 --- a/ql/src/test/results/clientpositive/join32_lessSize.q.out +++ b/ql/src/test/results/clientpositive/join32_lessSize.q.out @@ -163,7 +163,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 Position of Big Table: 0 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -175,7 +175,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3 + columns _col0,_col1 columns.types string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -303,7 +303,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 @@ -316,13 +316,13 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -362,7 +362,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3 + columns _col0,_col1 columns.types string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -371,7 +371,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3 + columns _col0,_col1 columns.types string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1566,11 +1566,11 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 Position of Big Table: 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2556,10 +2556,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2813,10 +2813,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out index afb373d..149a364 100644 --- a/ql/src/test/results/clientpositive/join33.q.out +++ b/ql/src/test/results/clientpositive/join33.q.out @@ -131,7 +131,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 $hdt$_2:x @@ -174,20 +174,20 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 Position of Big Table: 0 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join9.q.out b/ql/src/test/results/clientpositive/join9.q.out index 8421036..9af7e80 100644 --- a/ql/src/test/results/clientpositive/join9.q.out +++ b/ql/src/test/results/clientpositive/join9.q.out @@ -82,7 +82,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -90,8 +90,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -99,9 +99,10 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 0 + value expressions: _col1 (type: string) auto parallelism: false TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -109,8 +110,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -118,7 +119,6 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 1 - value expressions: _col1 (type: string) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -214,8 +214,8 @@ STAGE PLANS: name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /src [$hdt$_1:src2] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src1] + /src [$hdt$_0:src2] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:src1] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -224,10 +224,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out index aed41b0..b45a140 100644 --- a/ql/src/test/results/clientpositive/lineage2.q.out +++ b/ql/src/test/results/clientpositive/lineage2.q.out @@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"15","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]} 15 15 15 diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out index fb5e9df..2b2fa89 100644 --- a/ql/src/test/results/clientpositive/lineage3.q.out +++ b/ql/src/test/results/clientpositive/lineage3.q.out @@ -166,7 +166,7 @@ where key in (select key+18 from src1) order by key PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} 146 val_146 273 val_273 PREHOOK: query: select * from src1 a @@ -178,15 +178,15 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 311 val_311 -Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select key, value from src1 where key not in (select key+18 from src1) order by key PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]} PREHOOK: query: select * from src1 a where not exists (select cint from alltypesorc b @@ -196,7 +196,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 369 401 val_401 406 val_406 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out index 7f32108..c00e873 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out @@ -413,14 +413,99 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-0 is a root stage + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Partition Description: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: fact_daily + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: ((((value = 'val_484') or (value = 'val_238')) and ((key = '484') or (key = '238'))) and (((key = '484') and (value = 'val_484')) or ((key = '238') and (value = 'val_238')))) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: value=val_238 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 1 + hr 4 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.fact_daily + numFiles 3 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct fact_daily { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.fact_daily + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct fact_daily { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.fact_daily + name: default.fact_daily +#### A masked pattern was here #### Partition + base file name: value=val_484 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -464,20 +549,15 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.fact_daily name: default.fact_daily + Truncated Path -> Alias: + /fact_daily/ds=1/hr=4/key=238/value=val_238 [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily] + + Stage: Stage-0 + Fetch Operator + limit: -1 Processor Tree: - TableScan - alias: fact_daily - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: (((key = '484') and (value = 'val_484')) or ((key = '238') and (value = 'val_238'))) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - ListSink + ListSink PREHOOK: query: -- List Bucketing Query SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out index d46b0ae..273234f 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out @@ -690,12 +690,10 @@ STAGE PLANS: predicate: (x = 484) (type: boolean) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 484 (type: int) - outputColumnNames: _col0 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col0 (type: int) + keys: 484 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/literal_decimal.q.out b/ql/src/test/results/clientpositive/literal_decimal.q.out index eddc1a4..0b6299b 100644 --- a/ql/src/test/results/clientpositive/literal_decimal.q.out +++ b/ql/src/test/results/clientpositive/literal_decimal.q.out @@ -14,12 +14,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: -1 (type: decimal(1,0)), 0 (type: decimal(1,0)), 1 (type: decimal(1,0)), 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 99999999999999999 (type: decimal(17,0)), 99999999999999999.9999999999999 (type: decimal(30,13)), null (type: decimal(1,0)) + expressions: -1 (type: int), 0 (type: int), 1 (type: int), 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 99999999999999999 (type: bigint), 99999999999999999.9999999999999 (type: decimal(30,13)), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 500 Data size: 392000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 784 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1 diff --git a/ql/src/test/results/clientpositive/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/louter_join_ppr.q.out index 65fe291..87130c6 100644 --- a/ql/src/test/results/clientpositive/louter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/louter_join_ppr.q.out @@ -968,42 +968,42 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: b - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false - predicate: (((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) and key is not null) (type: boolean) - Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + predicate: (((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) and key is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE tag: 0 value expressions: _col1 (type: string) auto parallelism: false TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + alias: b + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false - predicate: (((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) and key is not null) (type: boolean) - Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE + predicate: (((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) and key is not null) (type: boolean) + Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE tag: 1 value expressions: _col1 (type: string) auto parallelism: false @@ -1147,9 +1147,9 @@ STAGE PLANS: name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /src [$hdt$_1:$hdt$_1:a] - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:b] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:b] + /src [$hdt$_0:$hdt$_0:a] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:b] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:$hdt$_1:b] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -1158,34 +1158,30 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col3 (type: string), _col4 (type: string), _col0 (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -1532,38 +1528,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col3) > 15.0) and (UDFToDouble(_col3) < 25.0)) (type: boolean) + predicate: ((UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/mapjoin_subquery.q.out b/ql/src/test/results/clientpositive/mapjoin_subquery.q.out index 1f7a5f4..8a54caa 100644 --- a/ql/src/test/results/clientpositive/mapjoin_subquery.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_subquery.q.out @@ -96,10 +96,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -339,10 +339,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out index 6a528dd..bbbd4dd 100644 --- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out @@ -768,22 +768,22 @@ STAGE PLANS: alias: orc_pred Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE @@ -833,25 +833,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_pred - filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) + filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out index 980b65b..00f9066 100644 --- a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out @@ -756,22 +756,22 @@ STAGE PLANS: alias: tbl_pred Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE @@ -821,25 +821,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_pred - filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) + filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out index 7ba9f87..566d7f4 100644 --- a/ql/src/test/results/clientpositive/pcr.q.out +++ b/ql/src/test/results/clientpositive/pcr.q.out @@ -1804,8 +1804,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2010,8 +2010,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2301,9 +2301,9 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] - /pcr_t1/ds=2000-04-10 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2484,7 +2484,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2))) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2)))) (type: boolean) Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) @@ -2590,8 +2590,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -3566,10 +3566,10 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] - /pcr_t1/ds=2000-04-10 [pcr_t1] - /pcr_t1/ds=2000-04-11 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-11 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -3885,9 +3885,9 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] - /pcr_t1/ds=2000-04-10 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -5569,8 +5569,8 @@ STAGE PLANS: name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [srcpart] - /srcpart/ds=2008-04-08/hr=12 [srcpart] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] Needs Tagging: false Reduce Operator Tree: Select Operator diff --git a/ql/src/test/results/clientpositive/pointlookup.q.out b/ql/src/test/results/clientpositive/pointlookup.q.out index a99b388..21bd22d 100644 --- a/ql/src/test/results/clientpositive/pointlookup.q.out +++ b/ql/src/test/results/clientpositive/pointlookup.q.out @@ -44,7 +44,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3'))) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3')))) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -110,7 +110,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3'))) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -176,7 +176,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3'))) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/pointlookup2.q.out b/ql/src/test/results/clientpositive/pointlookup2.q.out index d677327..eb8ffe3 100644 --- a/ql/src/test/results/clientpositive/pointlookup2.q.out +++ b/ql/src/test/results/clientpositive/pointlookup2.q.out @@ -68,7 +68,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_t1 POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] POSTHOOK: Lineage: pcr_t2.key SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: from pcr_t1 @@ -83,8 +83,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_t1 POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: pcr_t2.key SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] +POSTHOOK: Lineage: pcr_t2.key SIMPLE [] POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain extended select key, value, ds @@ -165,7 +165,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09'))) (type: boolean) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) @@ -271,8 +271,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator diff --git a/ql/src/test/results/clientpositive/pointlookup3.q.out b/ql/src/test/results/clientpositive/pointlookup3.q.out index 4cb3cba..fb18e74 100644 --- a/ql/src/test/results/clientpositive/pointlookup3.q.out +++ b/ql/src/test/results/clientpositive/pointlookup3.q.out @@ -125,7 +125,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (struct(ds1,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (struct(key,ds1)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09'))) (type: boolean) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string) @@ -233,8 +233,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1] + /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -365,7 +365,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (key = 1) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (key = 1)) (type: boolean) Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), ds1 (type: string) @@ -427,7 +427,7 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] + /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator diff --git a/ql/src/test/results/clientpositive/quotedid_basic.q.out b/ql/src/test/results/clientpositive/quotedid_basic.q.out index 50c83a8..f9dc0e0 100644 --- a/ql/src/test/results/clientpositive/quotedid_basic.q.out +++ b/ql/src/test/results/clientpositive/quotedid_basic.q.out @@ -101,11 +101,11 @@ STAGE PLANS: predicate: (!@#$%^&*()_q = '1') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: x+1 (type: string), y&y (type: string), '1' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: x+1 (type: string), y&y (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: _col0 (type: string), _col1 (type: string), '1' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -156,11 +156,11 @@ STAGE PLANS: predicate: (!@#$%^&*()_q = '1') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: x+1 (type: string), y&y (type: string), '1' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: x+1 (type: string), y&y (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: _col0 (type: string), _col1 (type: string), '1' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -260,11 +260,11 @@ STAGE PLANS: predicate: (!@#$%^&*()_q = '1') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: x+1 (type: string), y&y (type: string), '1' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: x+1 (type: string), y&y (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: _col0 (type: string), _col1 (type: string), '1' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE diff --git a/ql/src/test/results/clientpositive/quotedid_partition.q.out b/ql/src/test/results/clientpositive/quotedid_partition.q.out index bc52c82..30cfce3 100644 --- a/ql/src/test/results/clientpositive/quotedid_partition.q.out +++ b/ql/src/test/results/clientpositive/quotedid_partition.q.out @@ -46,11 +46,11 @@ STAGE PLANS: predicate: (x+1 = '10') (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: '10' (type: string), y&y (type: string), 'a' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: y&y (type: string) + outputColumnNames: _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: '10' (type: string), _col1 (type: string), 'a' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out index 634e171..4db38a7 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out @@ -57,14 +57,53 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-0 is a root stage + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Partition Description: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: ((rand(1) < 0.1) and ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0))) (type: boolean) + Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### Partition + base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -108,20 +147,14 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcpart name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=12 [a] + + Stage: Stage-0 + Fetch Operator + limit: -1 Processor Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((rand(1) < 0.1) and ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0))) (type: boolean) - Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE - ListSink + ListSink PREHOOK: query: select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out index 4d3f81d..dccbe98 100644 --- a/ql/src/test/results/clientpositive/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out @@ -780,38 +780,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -1532,34 +1528,30 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out index 6106188..4b29056 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out @@ -238,17 +238,28 @@ STAGE PLANS: predicate: (key = 238) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), value (type: string) + expressions: 238 (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + value expressions: _col1 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table2 Stage: Stage-0 Move Operator @@ -277,7 +288,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 POSTHOOK: Output: default@test_table2@ds=2 -POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [] POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(*) from test_table2 where ds = '2' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/auto_join14.q.out b/ql/src/test/results/clientpositive/spark/auto_join14.q.out index 710a316..f3fa2be 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join14.q.out @@ -70,12 +70,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 input vertices: 1 Map 2 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col3) (type: int), _col1 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/auto_join19.q.out b/ql/src/test/results/clientpositive/spark/auto_join19.q.out index 9e4fb8f..42072b9 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join19.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join19.q.out @@ -68,12 +68,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col2 input vertices: 1 Map 2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col2 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/auto_join32.q.out b/ql/src/test/results/clientpositive/spark/auto_join32.q.out index 679dd79..98482ea 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join32.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join32.q.out @@ -444,10 +444,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col1 (type: string), _col3 (type: string) + keys: _col1 (type: string), _col2 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE diff --git a/ql/src/test/results/clientpositive/spark/auto_join9.q.out b/ql/src/test/results/clientpositive/spark/auto_join9.q.out index 568891b..5e0d548 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join9.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join9.q.out @@ -28,14 +28,14 @@ STAGE PLANS: Map 2 Map Operator Tree: TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: @@ -51,14 +51,14 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -66,12 +66,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 input vertices: 1 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out index 2c14065..27ec6fe 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out @@ -1436,7 +1436,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) @@ -1549,7 +1549,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) diff --git a/ql/src/test/results/clientpositive/spark/insert1.q.out b/ql/src/test/results/clientpositive/spark/insert1.q.out index e72ba16..50e8376 100644 --- a/ql/src/test/results/clientpositive/spark/insert1.q.out +++ b/ql/src/test/results/clientpositive/spark/insert1.q.out @@ -26,7 +26,7 @@ POSTHOOK: query: insert overwrite table insert1 select a.key, a.value from inser POSTHOOK: type: QUERY POSTHOOK: Input: default@insert2 POSTHOOK: Output: default@insert1 -POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.key SIMPLE [] POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain insert into table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/join14.q.out b/ql/src/test/results/clientpositive/spark/join14.q.out index 4ca1495..12fced9 100644 --- a/ql/src/test/results/clientpositive/spark/join14.q.out +++ b/ql/src/test/results/clientpositive/spark/join14.q.out @@ -75,10 +75,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col3 + outputColumnNames: _col1, _col2 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col3) (type: int), _col1 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 366 Data size: 3890 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join28.q.out b/ql/src/test/results/clientpositive/spark/join28.q.out index a8177f3..c2dc48e 100644 --- a/ql/src/test/results/clientpositive/spark/join28.q.out +++ b/ql/src/test/results/clientpositive/spark/join28.q.out @@ -114,12 +114,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join32.q.out b/ql/src/test/results/clientpositive/spark/join32.q.out index 1c1c103..8e157f2 100644 --- a/ql/src/test/results/clientpositive/spark/join32.q.out +++ b/ql/src/test/results/clientpositive/spark/join32.q.out @@ -196,7 +196,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 Local Work: @@ -275,7 +275,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 input vertices: 1 Map 2 Position of Big Table: 0 @@ -284,15 +284,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 input vertices: 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out index 937e8fc..9a19db1 100644 --- a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out +++ b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out @@ -204,7 +204,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 Local Work: @@ -283,7 +283,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 input vertices: 1 Map 2 Position of Big Table: 0 @@ -292,15 +292,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 input vertices: 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1420,13 +1420,13 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 input vertices: 0 Map 1 Position of Big Table: 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2289,12 +2289,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2533,12 +2533,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join33.q.out b/ql/src/test/results/clientpositive/spark/join33.q.out index 1c1c103..8e157f2 100644 --- a/ql/src/test/results/clientpositive/spark/join33.q.out +++ b/ql/src/test/results/clientpositive/spark/join33.q.out @@ -196,7 +196,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 Local Work: @@ -275,7 +275,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 input vertices: 1 Map 2 Position of Big Table: 0 @@ -284,15 +284,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 input vertices: 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out b/ql/src/test/results/clientpositive/spark/join9.q.out index c7440da..864dcf9 100644 --- a/ql/src/test/results/clientpositive/spark/join9.q.out +++ b/ql/src/test/results/clientpositive/spark/join9.q.out @@ -87,7 +87,7 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -95,8 +95,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -104,18 +104,16 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 0 + value expressions: _col1 (type: string) auto parallelism: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: hr=12 + base file name: src input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 12 properties: COLUMN_STATS_ACCURATE true bucket_count -1 @@ -123,13 +121,11 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.srcpart + name default.src numFiles 1 numRows 500 - partition_columns ds/hr - partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} + serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -139,27 +135,30 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} + name default.src + numFiles 1 + numRows 500 + rawDataSize 5312 + serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart + name: default.src + name: default.src Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=12 [src1] + /src [src2] Map 3 Map Operator Tree: TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -167,8 +166,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -176,16 +175,18 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 1 - value expressions: _col1 (type: string) auto parallelism: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: src + base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 12 properties: COLUMN_STATS_ACCURATE true bucket_count -1 @@ -193,11 +194,13 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.src + name default.srcpart numFiles 1 numRows 500 + partition_columns ds/hr + partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct src { string key, string value} + serialization.ddl struct srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -207,26 +210,23 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src + name: default.srcpart + name: default.srcpart Truncated Path -> Alias: - /src [src2] + /srcpart/ds=2008-04-08/hr=12 [src1] Reducer 2 Needs Tagging: true Reduce Operator Tree: @@ -236,10 +236,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out index 4639b28..3421fa3 100644 --- a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out @@ -997,22 +997,22 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: b - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false - predicate: (((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) and key is not null) (type: boolean) - Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + predicate: (((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) and key is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE tag: 0 value expressions: _col1 (type: string) auto parallelism: false @@ -1021,12 +1021,9 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: hr=11 + base file name: src input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 11 properties: COLUMN_STATS_ACCURATE true bucket_count -1 @@ -1034,13 +1031,11 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.srcpart + name default.src numFiles 1 numRows 500 - partition_columns ds/hr - partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} + serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -1050,29 +1045,59 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} + name default.src + numFiles 1 + numRows 500 + rawDataSize 5312 + serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart + name: default.src + name: default.src + Truncated Path -> Alias: + /src [a] + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: (((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) and key is not null) (type: boolean) + Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE + tag: 1 + value expressions: _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: #### A masked pattern was here #### Partition - base file name: hr=12 + base file name: hr=11 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: ds 2008-04-08 - hr 12 + hr 11 properties: COLUMN_STATS_ACCURATE true bucket_count -1 @@ -1111,39 +1136,14 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcpart name: default.srcpart - Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [b] - /srcpart/ds=2008-04-08/hr=12 [b] - Map 3 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: (((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) and key is not null) (type: boolean) - Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE - tag: 1 - value expressions: _col1 (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: #### A masked pattern was here #### Partition - base file name: src + base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 12 properties: COLUMN_STATS_ACCURATE true bucket_count -1 @@ -1151,11 +1151,13 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.src + name default.srcpart numFiles 1 numRows 500 + partition_columns ds/hr + partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct src { string key, string value} + serialization.ddl struct srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -1165,26 +1167,24 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src + name: default.srcpart + name: default.srcpart Truncated Path -> Alias: - /src [a] + /srcpart/ds=2008-04-08/hr=11 [b] + /srcpart/ds=2008-04-08/hr=12 [b] Reducer 2 Needs Tagging: true Reduce Operator Tree: @@ -1194,34 +1194,30 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col3 (type: string), _col4 (type: string), _col0 (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -1580,38 +1576,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col3) > 15.0) and (UDFToDouble(_col3) < 25.0)) (type: boolean) + predicate: ((UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out index d74b7d0..d1b2301 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out @@ -101,12 +101,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -351,12 +351,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out index 3ee6e22..60965e8 100644 --- a/ql/src/test/results/clientpositive/spark/pcr.q.out +++ b/ql/src/test/results/clientpositive/spark/pcr.q.out @@ -2543,7 +2543,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2))) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2)))) (type: boolean) Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out index 6e34865..173132f 100644 --- a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out @@ -804,38 +804,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -1580,34 +1576,30 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out index 3076e06..bfdd529 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out @@ -233,6 +233,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) #### A masked pattern was here #### Vertices: Map 1 @@ -244,17 +246,29 @@ STAGE PLANS: predicate: (key = 238) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), value (type: string) + expressions: 238 (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + value expressions: _col1 (type: string) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table2 Stage: Stage-0 Move Operator @@ -283,7 +297,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 POSTHOOK: Output: default@test_table2@ds=2 -POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [] POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(*) from test_table2 where ds = '2' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out index babaff8..17821a8 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out @@ -150,7 +150,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -160,7 +160,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key PREHOOK: type: QUERY @@ -335,7 +335,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -345,7 +345,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY @@ -355,7 +355,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 -POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key @@ -381,7 +381,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY @@ -391,7 +391,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -401,7 +401,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 -POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key @@ -427,7 +427,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY @@ -437,7 +437,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -447,7 +447,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 -POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key diff --git a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out index e2f5269..324e8b7 100644 --- a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out +++ b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out @@ -51,4 +51,4 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testdate POSTHOOK: Output: default@testdate POSTHOOK: Lineage: testdate.dt EXPRESSION [(testdate)testdate.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: testdate.id EXPRESSION [(testdate)testdate.FieldSchema(name:id, type:int, comment:null), ] +POSTHOOK: Lineage: testdate.id EXPRESSION [] diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out index 6308cee..c31243c 100644 --- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out @@ -201,21 +201,21 @@ STAGE PLANS: alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean) + predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int) - outputColumnNames: _col0 + expressions: l_orderkey (type: int), l_linenumber (type: int) + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int) + keys: _col0 (type: int), _col1 (type: int) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) Local Work: Map Reduce Local Work @@ -239,8 +239,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 input vertices: 1 Map 2 diff --git a/ql/src/test/results/clientpositive/stats_empty_partition.q.out b/ql/src/test/results/clientpositive/stats_empty_partition.q.out index c13817e..202263e 100644 --- a/ql/src/test/results/clientpositive/stats_empty_partition.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_partition.q.out @@ -20,7 +20,7 @@ POSTHOOK: query: insert overwrite table tmptable partition (part = '1') select * POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@tmptable@part=1 -POSTHOOK: Lineage: tmptable PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable PARTITION(part=1).key SIMPLE [] POSTHOOK: Lineage: tmptable PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: describe formatted tmptable partition (part = '1') PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out index 5563794..ac9c174 100644 --- a/ql/src/test/results/clientpositive/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin.q.out @@ -1,4 +1,4 @@ -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, non corr explain select * @@ -151,7 +151,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from src where src.key not in ( select key from src s1 where s1.key > '2') @@ -285,7 +285,7 @@ POSTHOOK: Input: default@src 199 val_199 199 val_199 2 val_2 -Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select p_mfgr, b.p_name, p_size @@ -528,7 +528,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in @@ -567,7 +567,7 @@ Manufacturer#4 almond azure aquamarine papaya violet 12 Manufacturer#5 almond antique blue firebrick mint 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[41][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- agg, non corr explain select p_name, p_size @@ -843,7 +843,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[41][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: select p_name, p_size from part where part.p_size not in @@ -890,7 +890,7 @@ almond aquamarine sandy cyan gainsboro 18 almond aquamarine yellow dodger mint 7 almond azure aquamarine papaya violet 12 almond azure blanched chiffon midnight 23 -Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- agg, corr explain select p_mfgr, p_name, p_size @@ -1202,7 +1202,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) @@ -1278,7 +1278,7 @@ POSTHOOK: Input: default@lineitem 139636 1 175839 1 182052 1 -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- alternate not in syntax select * from src @@ -1442,7 +1442,7 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@t1_v POSTHOOK: Output: database:default POSTHOOK: Output: default@T2_v -Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain select * from T1_v where T1_v.key not in (select T2_v.key from T2_v) @@ -1587,7 +1587,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from T1_v where T1_v.key not in (select T2_v.key from T2_v) PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out index 9689ae3..7d23be3 100644 --- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out +++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out @@ -1,4 +1,4 @@ -Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- non agg, non corr -- JAVA_VERSION_SPECIFIC_OUTPUT @@ -188,7 +188,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select b.p_mfgr, min(p_retailprice) @@ -445,7 +445,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr @@ -470,7 +470,7 @@ POSTHOOK: Input: default@part #### A masked pattern was here #### Manufacturer#1 1173.15 Manufacturer#2 1690.68 -Warning: Shuffle Join JOIN[35][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product +Warning: Shuffle Join JOIN[34][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product PREHOOK: query: -- agg, non corr explain select b.p_mfgr, min(p_retailprice) @@ -733,7 +733,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[35][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product +Warning: Shuffle Join JOIN[34][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index 908ad39..ede3320 100644 --- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -773,7 +773,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select p_mfgr, b.p_name, p_size diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out index 470fa83..2afd7ff 100644 --- a/ql/src/test/results/clientpositive/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/subquery_views.q.out @@ -111,8 +111,8 @@ where `b`.`key` not in from `default`.`src` `a` where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11' ), tableType:VIRTUAL_VIEW) -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: explain select * from cv2 where cv2.key in (select key from cv2 c where c.key < '11') @@ -420,8 +420,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: select * from cv2 where cv2.key in (select key from cv2 c where c.key < '11') PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out index 3ccc52f..3a63c2e 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out @@ -1358,7 +1358,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) @@ -1463,7 +1463,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out index 792ccaf..be1ae2f 100644 --- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out @@ -1758,7 +1758,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -1782,42 +1781,42 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1851,8 +1850,25 @@ STAGE PLANS: Select Operator Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: '2008-04-08' (type: string) + sort order: + + Map-reduce partition columns: '2008-04-08' (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2008-04-08' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 1 Stage: Stage-0 Fetch Operator @@ -1860,18 +1876,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' @@ -4095,7 +4114,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -4118,33 +4136,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 2 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator @@ -4158,11 +4176,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) input vertices: 0 Map 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true Group By Operator aggregations: count() mode: hash @@ -4193,18 +4212,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out index ee70033..e1e66a4 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out @@ -2588,11 +2588,11 @@ Stage-0 | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ - | Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + | Statistics:Num rows: 5 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col1 (type: int) | Select Operator [SEL_2] | outputColumnNames:["_col0","_col1"] - | Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + | Statistics:Num rows: 5 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE | Filter Operator [FIL_16] | predicate:((((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0))) and key is not null) (type: boolean) | Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE @@ -2915,7 +2915,7 @@ Stage-0 limit:-1 Select Operator [SEL_2] outputColumnNames:["_col0"] - Filter Operator [FIL_4] + Filter Operator [FIL_6] predicate:((c_int = -6) or (c_int = 6)) (type: boolean) TableScan [TS_0] alias:cbo_t1 @@ -3005,61 +3005,61 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_15] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_14] + Select Operator [SEL_13] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_17] + Filter Operator [FIL_16] predicate:_col3 is null (type: boolean) Statistics:Num rows: 1 Data size: 269 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator [MERGEJOIN_19] + Merge Join Operator [MERGEJOIN_18] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"} | outputColumnNames:["_col0","_col1","_col3"] | Statistics:Num rows: 193 Data size: 51917 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_10] + | Reduce Output Operator [RS_9] | key expressions:_col1 (type: string) | Map-reduce partition columns:_col1 (type: string) | sort order:+ | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string) - | Select Operator [SEL_2] + | Select Operator [SEL_1] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:b | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 4 [SIMPLE_EDGE] - Reduce Output Operator [RS_11] + Reduce Output Operator [RS_10] key expressions:_col1 (type: string) Map-reduce partition columns:_col1 (type: string) sort order:+ Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_9] + Select Operator [SEL_8] outputColumnNames:["_col1"] Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_8] + Group By Operator [GBY_7] | keys:KEY._col0 (type: string), KEY._col1 (type: string) | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_7] + Reduce Output Operator [RS_6] key expressions:_col0 (type: string), _col1 (type: string) Map-reduce partition columns:_col0 (type: string), _col1 (type: string) sort order:++ Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_6] + Group By Operator [GBY_5] keys:key (type: string), value (type: string) outputColumnNames:["_col0","_col1"] Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_18] + Filter Operator [FIL_17] predicate:(value > 'val_2') (type: boolean) Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_3] + TableScan [TS_2] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE @@ -3092,57 +3092,57 @@ Stage-0 limit:-1 Stage-1 Reducer 3 - File Output Operator [FS_15] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_14] + Select Operator [SEL_13] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_17] + Filter Operator [FIL_16] predicate:_col3 is null (type: boolean) Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator [MERGEJOIN_19] + Merge Join Operator [MERGEJOIN_18] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"} | outputColumnNames:["_col0","_col1","_col3"] | Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 4 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_10] | key expressions:_col0 (type: string), _col1 (type: string) | Map-reduce partition columns:_col0 (type: string), _col1 (type: string) | sort order:++ | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_9] + | Select Operator [SEL_8] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_18] + | Filter Operator [FIL_17] | predicate:(value > 'val_12') (type: boolean) | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_7] + | TableScan [TS_6] | alias:b | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 2 [SIMPLE_EDGE] - Reduce Output Operator [RS_10] + Reduce Output Operator [RS_9] key expressions:_col1 (type: string), _col0 (type: string) Map-reduce partition columns:_col1 (type: string), _col0 (type: string) sort order:++ Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_5] + Group By Operator [GBY_4] | keys:KEY._col0 (type: string), KEY._col1 (type: string) | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - Reduce Output Operator [RS_4] + Reduce Output Operator [RS_3] key expressions:_col0 (type: string), _col1 (type: string) Map-reduce partition columns:_col0 (type: string), _col1 (type: string) sort order:++ Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_3] + Group By Operator [GBY_2] keys:key (type: string), value (type: string) outputColumnNames:["_col0","_col1"] Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_2] + Select Operator [SEL_1] outputColumnNames:["key","value"] Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE TableScan [TS_0] @@ -3185,17 +3185,17 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_16] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_21] + Merge Join Operator [MERGEJOIN_19] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_9] | key expressions:_col1 (type: string), _col0 (type: string) | Map-reduce partition columns:_col1 (type: string), _col0 (type: string) | sort order:++ @@ -3203,29 +3203,29 @@ Stage-0 | Select Operator [SEL_2] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_19] + | Filter Operator [FIL_17] | predicate:((value > 'val_9') and key is not null) (type: boolean) | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:b | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_13] + Reduce Output Operator [RS_11] key expressions:_col0 (type: string), _col1 (type: string) Map-reduce partition columns:_col0 (type: string), _col1 (type: string) sort order:++ Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_9] + Group By Operator [GBY_7] keys:_col0 (type: string), _col1 (type: string) outputColumnNames:["_col0","_col1"] Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_7] + Select Operator [SEL_5] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_20] + Filter Operator [FIL_18] predicate:((value > 'val_9') and key is not null) (type: boolean) Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_5] + TableScan [TS_3] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE @@ -3257,17 +3257,17 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_16] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_21] + Merge Join Operator [MERGEJOIN_19] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_9] | key expressions:_col1 (type: string), _col0 (type: string) | Map-reduce partition columns:_col1 (type: string), _col0 (type: string) | sort order:++ @@ -3275,29 +3275,29 @@ Stage-0 | Select Operator [SEL_2] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_19] + | Filter Operator [FIL_17] | predicate:((value > 'val_9') and key is not null) (type: boolean) | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:b | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_13] + Reduce Output Operator [RS_11] key expressions:_col0 (type: string), _col1 (type: string) Map-reduce partition columns:_col0 (type: string), _col1 (type: string) sort order:++ Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_9] + Group By Operator [GBY_7] keys:_col0 (type: string), _col1 (type: string) outputColumnNames:["_col0","_col1"] Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_7] + Select Operator [SEL_5] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_20] + Filter Operator [FIL_18] predicate:((value > 'val_9') and key is not null) (type: boolean) Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_5] + TableScan [TS_3] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE @@ -3319,17 +3319,17 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_16] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_21] + Merge Join Operator [MERGEJOIN_19] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_9] | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ @@ -3338,29 +3338,29 @@ Stage-0 | Select Operator [SEL_2] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_19] + | Filter Operator [FIL_17] | predicate:(key > '9') (type: boolean) | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:src_cbo | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_13] + Reduce Output Operator [RS_11] key expressions:_col0 (type: string) Map-reduce partition columns:_col0 (type: string) sort order:+ Statistics:Num rows: 69 Data size: 6003 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_9] + Group By Operator [GBY_7] keys:_col0 (type: string) outputColumnNames:["_col0"] Statistics:Num rows: 69 Data size: 6003 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_7] + Select Operator [SEL_5] outputColumnNames:["_col0"] Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_20] + Filter Operator [FIL_18] predicate:(key > '9') (type: boolean) Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_5] + TableScan [TS_3] alias:src_cbo Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE @@ -3386,89 +3386,89 @@ Stage-0 limit:-1 Stage-1 Reducer 3 - File Output Operator [FS_27] + File Output Operator [FS_25] compressed:false Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_26] + Select Operator [SEL_24] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator [MERGEJOIN_37] + Merge Join Operator [MERGEJOIN_35] | condition map:[{"":"Inner Join 0 to 1"}] | keys:{"0":"_col1 (type: int)","1":"_col0 (type: int)"} | outputColumnNames:["_col1","_col2"] | Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 2 [SIMPLE_EDGE] - | Reduce Output Operator [RS_22] + | Reduce Output Operator [RS_20] | key expressions:_col1 (type: int) | Map-reduce partition columns:_col1 (type: int) | sort order:+ | Statistics:Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col2 (type: int) - | Merge Join Operator [MERGEJOIN_36] + | Merge Join Operator [MERGEJOIN_34] | | condition map:[{"":"Left Semi Join 0 to 1"}] - | | keys:{"0":"_col0 (type: int)","1":"_col0 (type: int)"} + | | keys:{"0":"_col0 (type: int), 1 (type: int)","1":"_col0 (type: int), _col1 (type: int)"} | | outputColumnNames:["_col1","_col2"] | | Statistics:Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 1 [SIMPLE_EDGE] - | | Reduce Output Operator [RS_17] - | | key expressions:_col0 (type: int) - | | Map-reduce partition columns:_col0 (type: int) - | | sort order:+ + | | Reduce Output Operator [RS_15] + | | key expressions:_col0 (type: int), 1 (type: int) + | | Map-reduce partition columns:_col0 (type: int), 1 (type: int) + | | sort order:++ | | Statistics:Num rows: 16 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE | | value expressions:_col1 (type: int), _col2 (type: int) | | Select Operator [SEL_2] | | outputColumnNames:["_col0","_col1","_col2"] | | Statistics:Num rows: 16 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE - | | Filter Operator [FIL_33] + | | Filter Operator [FIL_31] | | predicate:(((l_linenumber = 1) and l_orderkey is not null) and l_partkey is not null) (type: boolean) | | Statistics:Num rows: 16 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE | | TableScan [TS_0] | | alias:lineitem | | Statistics:Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 4 [SIMPLE_EDGE] - | Reduce Output Operator [RS_19] - | key expressions:_col0 (type: int) - | Map-reduce partition columns:_col0 (type: int) - | sort order:+ - | Statistics:Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_15] - | keys:_col0 (type: int) - | outputColumnNames:["_col0"] - | Statistics:Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + | Reduce Output Operator [RS_17] + | key expressions:_col0 (type: int), _col1 (type: int) + | Map-reduce partition columns:_col0 (type: int), _col1 (type: int) + | sort order:++ + | Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + | Group By Operator [GBY_13] + | keys:_col0 (type: int), _col1 (type: int) + | outputColumnNames:["_col0","_col1"] + | Statistics:Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE | Select Operator [SEL_5] - | outputColumnNames:["_col0"] - | Statistics:Num rows: 14 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_34] - | predicate:(((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean) + | outputColumnNames:["_col0","_col1"] + | Statistics:Num rows: 14 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + | Filter Operator [FIL_32] + | predicate:(((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) | Statistics:Num rows: 14 Data size: 1344 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_3] | alias:lineitem | Statistics:Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 6 [SIMPLE_EDGE] - Reduce Output Operator [RS_24] + Reduce Output Operator [RS_22] key expressions:_col0 (type: int) Map-reduce partition columns:_col0 (type: int) sort order:+ Statistics:Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_12] + Group By Operator [GBY_10] | keys:KEY._col0 (type: int) | outputColumnNames:["_col0"] | Statistics:Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 5 [SIMPLE_EDGE] - Reduce Output Operator [RS_11] + Reduce Output Operator [RS_9] key expressions:_col0 (type: int) Map-reduce partition columns:_col0 (type: int) sort order:+ Statistics:Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_10] + Group By Operator [GBY_8] keys:l_partkey (type: int) outputColumnNames:["_col0"] Statistics:Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_35] + Filter Operator [FIL_33] predicate:l_partkey is not null (type: boolean) Statistics:Num rows: 100 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_8] + TableScan [TS_6] alias:lineitem Statistics:Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: COMPLETE @@ -3497,49 +3497,49 @@ Stage-0 limit:-1 Stage-1 Reducer 4 - File Output Operator [FS_37] + File Output Operator [FS_33] compressed:false Statistics:Num rows: 34 Data size: 6324 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_50] + Merge Join Operator [MERGEJOIN_46] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col2 (type: bigint)","1":"_col0 (type: bigint)"} | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 34 Data size: 6324 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 3 [SIMPLE_EDGE] - | Reduce Output Operator [RS_32] + | Reduce Output Operator [RS_28] | key expressions:_col2 (type: bigint) | Map-reduce partition columns:_col2 (type: bigint) | sort order:+ | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string), _col1 (type: string) - | Filter Operator [FIL_43] + | Filter Operator [FIL_39] | predicate:_col2 is not null (type: boolean) | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_20] + | Group By Operator [GBY_16] | | aggregations:["count(VALUE._col0)"] | | keys:KEY._col0 (type: string), KEY._col1 (type: string) | | outputColumnNames:["_col0","_col1","_col2"] | | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE | |<-Reducer 2 [SIMPLE_EDGE] - | Reduce Output Operator [RS_19] + | Reduce Output Operator [RS_15] | key expressions:_col0 (type: string), _col1 (type: string) | Map-reduce partition columns:_col0 (type: string), _col1 (type: string) | sort order:++ | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col2 (type: bigint) - | Group By Operator [GBY_18] + | Group By Operator [GBY_14] | aggregations:["count()"] | keys:_col0 (type: string), _col1 (type: string) | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE - | Merge Join Operator [MERGEJOIN_49] + | Merge Join Operator [MERGEJOIN_45] | | condition map:[{"":"Left Semi Join 0 to 1"}] | | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"} | | outputColumnNames:["_col0","_col1"] | | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 1 [SIMPLE_EDGE] - | | Reduce Output Operator [RS_13] + | | Reduce Output Operator [RS_9] | | key expressions:_col0 (type: string) | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ @@ -3548,71 +3548,71 @@ Stage-0 | | Select Operator [SEL_2] | | outputColumnNames:["_col0","_col1"] | | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | | Filter Operator [FIL_44] + | | Filter Operator [FIL_40] | | predicate:(key > '8') (type: boolean) | | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE | | TableScan [TS_0] | | alias:b | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 5 [SIMPLE_EDGE] - | Reduce Output Operator [RS_15] + | Reduce Output Operator [RS_11] | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ | Statistics:Num rows: 69 Data size: 6003 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_11] + | Group By Operator [GBY_7] | keys:_col0 (type: string) | outputColumnNames:["_col0"] | Statistics:Num rows: 69 Data size: 6003 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_9] + | Select Operator [SEL_5] | outputColumnNames:["_col0"] | Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_45] + | Filter Operator [FIL_41] | predicate:(key > '8') (type: boolean) | Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_7] + | TableScan [TS_3] | alias:b | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 7 [SIMPLE_EDGE] - Reduce Output Operator [RS_34] + Reduce Output Operator [RS_30] key expressions:_col0 (type: bigint) Map-reduce partition columns:_col0 (type: bigint) sort order:+ Statistics:Num rows: 34 Data size: 272 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_30] + Group By Operator [GBY_26] keys:_col0 (type: bigint) outputColumnNames:["_col0"] Statistics:Num rows: 34 Data size: 272 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_28] + Select Operator [SEL_24] outputColumnNames:["_col0"] Statistics:Num rows: 69 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_46] + Filter Operator [FIL_42] predicate:_col1 is not null (type: boolean) Statistics:Num rows: 69 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_48] + Select Operator [SEL_44] outputColumnNames:["_col1"] Statistics:Num rows: 69 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_27] + Group By Operator [GBY_23] | aggregations:["count(VALUE._col0)"] | keys:KEY._col0 (type: string) | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 6 [SIMPLE_EDGE] - Reduce Output Operator [RS_26] + Reduce Output Operator [RS_22] key expressions:_col0 (type: string) Map-reduce partition columns:_col0 (type: string) sort order:+ Statistics:Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE value expressions:_col1 (type: bigint) - Group By Operator [GBY_25] + Group By Operator [GBY_21] aggregations:["count()"] keys:key (type: string) outputColumnNames:["_col0","_col1"] Statistics:Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_47] + Filter Operator [FIL_43] predicate:(key > '9') (type: boolean) Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_22] + TableScan [TS_18] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE @@ -3640,78 +3640,78 @@ Stage-0 limit:-1 Stage-1 Reducer 3 - File Output Operator [FS_21] + File Output Operator [FS_20] compressed:false Statistics:Num rows: 3 Data size: 681 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_26] + Merge Join Operator [MERGEJOIN_25] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col1 (type: string)","1":"_col0 (type: string)"} | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 3 Data size: 681 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 2 [SIMPLE_EDGE] - | Reduce Output Operator [RS_16] + | Reduce Output Operator [RS_15] | key expressions:_col1 (type: string) | Map-reduce partition columns:_col1 (type: string) | sort order:+ | Statistics:Num rows: 13 Data size: 2951 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string), _col2 (type: double) - | Select Operator [SEL_6] + | Select Operator [SEL_5] | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 13 Data size: 2951 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_5] + | Group By Operator [GBY_4] | | aggregations:["avg(VALUE._col0)"] | | keys:KEY._col0 (type: string), KEY._col1 (type: string) | | outputColumnNames:["_col0","_col1","_col2"] | | Statistics:Num rows: 13 Data size: 2951 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_4] + | Reduce Output Operator [RS_3] | key expressions:_col0 (type: string), _col1 (type: string) | Map-reduce partition columns:_col0 (type: string), _col1 (type: string) | sort order:++ | Statistics:Num rows: 13 Data size: 2847 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col2 (type: struct) - | Group By Operator [GBY_3] + | Group By Operator [GBY_2] | aggregations:["avg(p_size)"] | keys:p_name (type: string), p_mfgr (type: string) | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 13 Data size: 2847 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_24] + | Filter Operator [FIL_23] | predicate:p_name is not null (type: boolean) | Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:part | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 5 [SIMPLE_EDGE] - Reduce Output Operator [RS_18] + Reduce Output Operator [RS_17] key expressions:_col0 (type: string) Map-reduce partition columns:_col0 (type: string) sort order:+ Statistics:Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_14] + Group By Operator [GBY_13] keys:_col0 (type: string) outputColumnNames:["_col0"] Statistics:Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_11] + Select Operator [SEL_10] outputColumnNames:["_col0"] Statistics:Num rows: 13 Data size: 2392 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_25] + Filter Operator [FIL_24] predicate:first_value_window_0 is not null (type: boolean) Statistics:Num rows: 13 Data size: 6383 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator [PTF_10] + PTF Operator [PTF_9] Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"name:":"windowingtablefunction","order by:":"_col5","partition by:":"_col2"}] Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_9] + Select Operator [SEL_8] | outputColumnNames:["_col1","_col2","_col5"] | Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 4 [SIMPLE_EDGE] - Reduce Output Operator [RS_8] + Reduce Output Operator [RS_7] key expressions:p_mfgr (type: string), p_size (type: int) Map-reduce partition columns:p_mfgr (type: string) sort order:++ Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE value expressions:p_name (type: string) - TableScan [TS_7] + TableScan [TS_6] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE @@ -3742,96 +3742,96 @@ Stage-0 limit:-1 Stage-1 Reducer 4 - File Output Operator [FS_28] + File Output Operator [FS_27] compressed:false Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_27] + Select Operator [SEL_26] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE |<-Reducer 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_26] + Reduce Output Operator [RS_25] key expressions:_col0 (type: string) sort order:+ Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE value expressions:_col1 (type: string) - Select Operator [SEL_25] + Select Operator [SEL_24] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE - Filter Operator [FIL_31] + Filter Operator [FIL_30] predicate:_col3 is null (type: boolean) Statistics:Num rows: 302 Data size: 53756 Basic stats: COMPLETE Column stats: NONE - Merge Join Operator [MERGEJOIN_36] + Merge Join Operator [MERGEJOIN_35] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"} | outputColumnNames:["_col0","_col1","_col3"] | Statistics:Num rows: 605 Data size: 107690 Basic stats: COMPLETE Column stats: NONE |<-Map 7 [SIMPLE_EDGE] - | Reduce Output Operator [RS_22] + | Reduce Output Operator [RS_21] | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ | Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_16] + | Select Operator [SEL_15] | outputColumnNames:["_col0"] | Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_34] + | Filter Operator [FIL_33] | predicate:(key > '2') (type: boolean) | Statistics:Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_14] + | TableScan [TS_13] | alias:src_cbo | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 2 [SIMPLE_EDGE] - Reduce Output Operator [RS_21] + Reduce Output Operator [RS_20] key expressions:_col0 (type: string) Map-reduce partition columns:_col0 (type: string) sort order:+ Statistics:Num rows: 550 Data size: 97900 Basic stats: COMPLETE Column stats: NONE value expressions:_col1 (type: string) - Merge Join Operator [MERGEJOIN_35] + Merge Join Operator [MERGEJOIN_34] | condition map:[{"":"Inner Join 0 to 1"}] | keys:{} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 550 Data size: 97900 Basic stats: COMPLETE Column stats: NONE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_18] + | Reduce Output Operator [RS_17] | sort order: | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string), _col1 (type: string) - | Select Operator [SEL_2] + | Select Operator [SEL_1] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:src_cbo | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 6 [SIMPLE_EDGE] - Reduce Output Operator [RS_19] + Reduce Output Operator [RS_18] sort order: Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator [SEL_11] + Select Operator [SEL_10] Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Filter Operator [FIL_32] + Filter Operator [FIL_31] predicate:(_col0 = 0) (type: boolean) Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_10] + Group By Operator [GBY_9] | aggregations:["count(VALUE._col0)"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 5 [SIMPLE_EDGE] - Reduce Output Operator [RS_9] + Reduce Output Operator [RS_8] sort order: Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions:_col0 (type: bigint) - Group By Operator [GBY_8] + Group By Operator [GBY_7] aggregations:["count()"] outputColumnNames:["_col0"] Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_5] + Select Operator [SEL_4] Statistics:Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_33] + Filter Operator [FIL_32] predicate:((key > '2') and key is null) (type: boolean) Statistics:Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_3] + TableScan [TS_2] alias:src_cbo Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE @@ -3863,87 +3863,87 @@ Stage-0 limit:-1 Stage-1 Reducer 3 - File Output Operator [FS_26] + File Output Operator [FS_25] compressed:false Statistics:Num rows: 15 Data size: 3507 Basic stats: COMPLETE Column stats: NONE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_25] + Select Operator [SEL_24] outputColumnNames:["_col0","_col1","_col2"] Statistics:Num rows: 15 Data size: 3507 Basic stats: COMPLETE Column stats: NONE - Filter Operator [FIL_29] + Filter Operator [FIL_28] predicate:_col4 is null (type: boolean) Statistics:Num rows: 15 Data size: 3507 Basic stats: COMPLETE Column stats: NONE - Merge Join Operator [MERGEJOIN_34] + Merge Join Operator [MERGEJOIN_33] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"_col0 (type: string), _col1 (type: string)","1":"_col0 (type: string), _col1 (type: string)"} | outputColumnNames:["_col0","_col1","_col2","_col4"] | Statistics:Num rows: 30 Data size: 7014 Basic stats: COMPLETE Column stats: NONE |<-Map 6 [SIMPLE_EDGE] - | Reduce Output Operator [RS_22] + | Reduce Output Operator [RS_21] | key expressions:_col0 (type: string), _col1 (type: string) | Map-reduce partition columns:_col0 (type: string), _col1 (type: string) | sort order:++ | Statistics:Num rows: 8 Data size: 1752 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_16] + | Select Operator [SEL_15] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 8 Data size: 1752 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_32] + | Filter Operator [FIL_31] | predicate:(p_size < 10) (type: boolean) | Statistics:Num rows: 8 Data size: 1784 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_14] + | TableScan [TS_13] | alias:b | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 2 [SIMPLE_EDGE] - Reduce Output Operator [RS_21] + Reduce Output Operator [RS_20] key expressions:_col0 (type: string), _col1 (type: string) Map-reduce partition columns:_col0 (type: string), _col1 (type: string) sort order:++ Statistics:Num rows: 28 Data size: 6377 Basic stats: COMPLETE Column stats: NONE value expressions:_col2 (type: int) - Merge Join Operator [MERGEJOIN_33] + Merge Join Operator [MERGEJOIN_32] | condition map:[{"":"Inner Join 0 to 1"}] | keys:{} | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 28 Data size: 6377 Basic stats: COMPLETE Column stats: NONE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_18] + | Reduce Output Operator [RS_17] | sort order: | Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: int) - | Select Operator [SEL_2] + | Select Operator [SEL_1] | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:b | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 5 [SIMPLE_EDGE] - Reduce Output Operator [RS_19] + Reduce Output Operator [RS_18] sort order: Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator [SEL_11] + Select Operator [SEL_10] Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Filter Operator [FIL_30] + Filter Operator [FIL_29] predicate:(_col0 = 0) (type: boolean) Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_10] + Group By Operator [GBY_9] | aggregations:["count(VALUE._col0)"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 4 [SIMPLE_EDGE] - Reduce Output Operator [RS_9] + Reduce Output Operator [RS_8] sort order: Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions:_col0 (type: bigint) - Group By Operator [GBY_8] + Group By Operator [GBY_7] aggregations:["count()"] outputColumnNames:["_col0"] Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_5] + Select Operator [SEL_4] Statistics:Num rows: 1 Data size: 223 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_31] + Filter Operator [FIL_30] predicate:((p_size < 10) and (p_name is null or p_mfgr is null)) (type: boolean) Statistics:Num rows: 1 Data size: 223 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_3] + TableScan [TS_2] alias:b Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE @@ -3977,110 +3977,110 @@ Stage-0 limit:-1 Stage-1 Reducer 4 - File Output Operator [FS_38] + File Output Operator [FS_37] compressed:false Statistics:Num rows: 1 Data size: 146 Basic stats: COMPLETE Column stats: NONE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_37] + Select Operator [SEL_36] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 1 Data size: 146 Basic stats: COMPLETE Column stats: NONE |<-Reducer 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_36] + Reduce Output Operator [RS_35] key expressions:_col0 (type: string) sort order:+ Statistics:Num rows: 1 Data size: 146 Basic stats: COMPLETE Column stats: NONE value expressions:_col1 (type: int) - Merge Join Operator [MERGEJOIN_48] + Merge Join Operator [MERGEJOIN_47] | condition map:[{"":"Inner Join 0 to 1"}] | keys:{} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 1 Data size: 146 Basic stats: COMPLETE Column stats: NONE |<-Reducer 2 [SIMPLE_EDGE] - | Reduce Output Operator [RS_30] + | Reduce Output Operator [RS_29] | sort order: | Statistics:Num rows: 1 Data size: 133 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string), _col1 (type: int) - | Filter Operator [FIL_41] + | Filter Operator [FIL_40] | predicate:_col2 is null (type: boolean) | Statistics:Num rows: 1 Data size: 133 Basic stats: COMPLETE Column stats: COMPLETE - | Merge Join Operator [MERGEJOIN_47] + | Merge Join Operator [MERGEJOIN_46] | | condition map:[{"":"Left Outer Join0 to 1"}] | | keys:{"0":"UDFToDouble(_col1) (type: double)","1":"_col0 (type: double)"} | | outputColumnNames:["_col0","_col1","_col2"] | | Statistics:Num rows: 1 Data size: 133 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 1 [SIMPLE_EDGE] - | | Reduce Output Operator [RS_27] + | | Reduce Output Operator [RS_26] | | key expressions:UDFToDouble(_col1) (type: double) | | Map-reduce partition columns:UDFToDouble(_col1) (type: double) | | sort order:+ | | Statistics:Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE | | value expressions:_col0 (type: string), _col1 (type: int) - | | Select Operator [SEL_2] + | | Select Operator [SEL_1] | | outputColumnNames:["_col0","_col1"] | | Statistics:Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE | | TableScan [TS_0] | | alias:part | | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE | |<-Reducer 6 [SIMPLE_EDGE] - | Reduce Output Operator [RS_28] + | Reduce Output Operator [RS_27] | key expressions:_col0 (type: double) | Map-reduce partition columns:_col0 (type: double) | sort order:+ | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_8] + | Group By Operator [GBY_7] | | aggregations:["avg(VALUE._col0)"] | | outputColumnNames:["_col0"] | | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 5 [SIMPLE_EDGE] - | Reduce Output Operator [RS_7] + | Reduce Output Operator [RS_6] | sort order: | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE | value expressions:_col0 (type: struct) - | Group By Operator [GBY_6] + | Group By Operator [GBY_5] | aggregations:["avg(p_size)"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - | Filter Operator [FIL_43] + | Filter Operator [FIL_42] | predicate:(p_size < 10) (type: boolean) | Statistics:Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_3] + | TableScan [TS_2] | alias:part | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 8 [SIMPLE_EDGE] - Reduce Output Operator [RS_31] + Reduce Output Operator [RS_30] sort order: Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator [SEL_23] + Select Operator [SEL_22] Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Filter Operator [FIL_44] + Filter Operator [FIL_43] predicate:(_col0 = 0) (type: boolean) Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_22] + Group By Operator [GBY_21] aggregations:["count()"] outputColumnNames:["_col0"] Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_16] + Select Operator [SEL_15] Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_45] + Filter Operator [FIL_44] predicate:_col0 is null (type: boolean) Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_15] + Group By Operator [GBY_14] | aggregations:["avg(VALUE._col0)"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 7 [SIMPLE_EDGE] - Reduce Output Operator [RS_14] + Reduce Output Operator [RS_13] sort order: Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE value expressions:_col0 (type: struct) - Group By Operator [GBY_13] + Group By Operator [GBY_12] aggregations:["avg(p_size)"] outputColumnNames:["_col0"] Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Filter Operator [FIL_46] + Filter Operator [FIL_45] predicate:(p_size < 10) (type: boolean) Statistics:Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_10] + TableScan [TS_9] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE @@ -4120,149 +4120,149 @@ Stage-0 limit:-1 Stage-1 Reducer 5 - File Output Operator [FS_40] + File Output Operator [FS_39] compressed:false Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_39] + Select Operator [SEL_38] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE |<-Reducer 4 [SIMPLE_EDGE] - Reduce Output Operator [RS_38] + Reduce Output Operator [RS_37] key expressions:_col0 (type: string) sort order:+ Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE value expressions:_col1 (type: double) - Select Operator [SEL_37] + Select Operator [SEL_36] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE - Filter Operator [FIL_43] + Filter Operator [FIL_42] predicate:_col3 is null (type: boolean) Statistics:Num rows: 2 Data size: 256 Basic stats: COMPLETE Column stats: NONE - Merge Join Operator [MERGEJOIN_49] + Merge Join Operator [MERGEJOIN_48] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"_col0 (type: string), _col1 (type: double)","1":"_col0 (type: string), _col1 (type: double)"} | outputColumnNames:["_col0","_col1","_col3"] | Statistics:Num rows: 5 Data size: 641 Basic stats: COMPLETE Column stats: NONE |<-Reducer 10 [SIMPLE_EDGE] - | Reduce Output Operator [RS_34] + | Reduce Output Operator [RS_33] | key expressions:_col0 (type: string), _col1 (type: double) | Map-reduce partition columns:_col0 (type: string), _col1 (type: double) | sort order:++ | Statistics:Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_28] + | Select Operator [SEL_27] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_46] + | Filter Operator [FIL_45] | predicate:((_col2 - _col1) > 600.0) (type: boolean) | Statistics:Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_26] + | Group By Operator [GBY_25] | | aggregations:["min(VALUE._col0)","max(VALUE._col1)"] | | keys:KEY._col0 (type: string) | | outputColumnNames:["_col0","_col1","_col2"] | | Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 9 [SIMPLE_EDGE] - | Reduce Output Operator [RS_25] + | Reduce Output Operator [RS_24] | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ | Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col1 (type: double), _col2 (type: double) - | Group By Operator [GBY_24] + | Group By Operator [GBY_23] | aggregations:["min(p_retailprice)","max(p_retailprice)"] | keys:p_mfgr (type: string) | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_22] + | TableScan [TS_21] | alias:b | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_33] + Reduce Output Operator [RS_32] key expressions:_col0 (type: string), _col1 (type: double) Map-reduce partition columns:_col0 (type: string), _col1 (type: double) sort order:++ Statistics:Num rows: 5 Data size: 583 Basic stats: COMPLETE Column stats: NONE - Merge Join Operator [MERGEJOIN_48] + Merge Join Operator [MERGEJOIN_47] | condition map:[{"":"Inner Join 0 to 1"}] | keys:{} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 5 Data size: 583 Basic stats: COMPLETE Column stats: NONE |<-Reducer 2 [SIMPLE_EDGE] - | Reduce Output Operator [RS_30] + | Reduce Output Operator [RS_29] | sort order: | Statistics:Num rows: 5 Data size: 530 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string), _col1 (type: double) - | Group By Operator [GBY_5] + | Group By Operator [GBY_4] | | aggregations:["min(VALUE._col0)"] | | keys:KEY._col0 (type: string) | | outputColumnNames:["_col0","_col1"] | | Statistics:Num rows: 5 Data size: 530 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_4] + | Reduce Output Operator [RS_3] | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ | Statistics:Num rows: 5 Data size: 530 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col1 (type: double) - | Group By Operator [GBY_3] + | Group By Operator [GBY_2] | aggregations:["min(p_retailprice)"] | keys:p_mfgr (type: string) | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 5 Data size: 530 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_2] + | Select Operator [SEL_1] | outputColumnNames:["p_mfgr","p_retailprice"] | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:b | Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 8 [SIMPLE_EDGE] - Reduce Output Operator [RS_31] + Reduce Output Operator [RS_30] sort order: Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator [SEL_19] + Select Operator [SEL_18] Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Filter Operator [FIL_44] + Filter Operator [FIL_43] predicate:(_col0 = 0) (type: boolean) Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_18] + Group By Operator [GBY_17] | aggregations:["count(VALUE._col0)"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 7 [SIMPLE_EDGE] - Reduce Output Operator [RS_17] + Reduce Output Operator [RS_16] sort order: Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions:_col0 (type: bigint) - Group By Operator [GBY_16] + Group By Operator [GBY_15] aggregations:["count()"] outputColumnNames:["_col0"] Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_13] + Select Operator [SEL_12] Statistics:Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_45] + Filter Operator [FIL_44] predicate:((_col0 is null or _col1 is null) and ((_col2 - _col1) > 600.0)) (type: boolean) Statistics:Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_11] + Group By Operator [GBY_10] | aggregations:["min(VALUE._col0)","max(VALUE._col1)"] | keys:KEY._col0 (type: string) | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 6 [SIMPLE_EDGE] - Reduce Output Operator [RS_10] + Reduce Output Operator [RS_9] key expressions:_col0 (type: string) Map-reduce partition columns:_col0 (type: string) sort order:+ Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE value expressions:_col1 (type: double), _col2 (type: double) - Group By Operator [GBY_9] + Group By Operator [GBY_8] aggregations:["min(p_retailprice)","max(p_retailprice)"] keys:p_mfgr (type: string) outputColumnNames:["_col0","_col1","_col2"] Statistics:Num rows: 5 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_8] + Select Operator [SEL_7] outputColumnNames:["p_mfgr","p_retailprice"] Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_7] + TableScan [TS_6] alias:b Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out index ff055ea..3f9039a 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out @@ -198,8 +198,8 @@ Stage-0 Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Merge Join Operator [MERGEJOIN_28] | condition map:[{"":"Inner Join 0 to 1"}] - | keys:{"0":"_col3 (type: string)","1":"_col0 (type: string)"} - | outputColumnNames:["_col0","_col3","_col6"] + | keys:{"0":"_col1 (type: string)","1":"_col0 (type: string)"} + | outputColumnNames:["_col0","_col1","_col4"] | Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE |<-Map 5 [SIMPLE_EDGE] | Reduce Output Operator [RS_15] @@ -219,15 +219,15 @@ Stage-0 | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE |<-Reducer 2 [SIMPLE_EDGE] Reduce Output Operator [RS_13] - key expressions:_col3 (type: string) - Map-reduce partition columns:_col3 (type: string) + key expressions:_col1 (type: string) + Map-reduce partition columns:_col1 (type: string) sort order:+ Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE value expressions:_col0 (type: string) Merge Join Operator [MERGEJOIN_27] | condition map:[{"":"Inner Join 0 to 1"}] | keys:{"0":"_col0 (type: string)","1":"_col1 (type: string)"} - | outputColumnNames:["_col0","_col3"] + | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE |<-Map 1 [SIMPLE_EDGE] | Reduce Output Operator [RS_8] @@ -367,53 +367,53 @@ Stage-0 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint) Group By Operator [GBY_61] - aggregations:["count(_col13)","count(_col21)","count(_col3)"] - keys:_col2 (type: string), _col12 (type: string), _col20 (type: string) + aggregations:["count(_col9)","count(_col15)","count(_col3)"] + keys:_col2 (type: string), _col8 (type: string), _col14 (type: string) outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Select Operator [SEL_60] - outputColumnNames:["_col2","_col12","_col20","_col13","_col21","_col3"] + outputColumnNames:["_col2","_col8","_col14","_col9","_col15","_col3"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Merge Join Operator [MERGEJOIN_110] | condition map:[{"":"Inner Join 0 to 1"}] - | keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col15 (type: string), _col17 (type: string)"} - | outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"] + | keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col10 (type: string), _col12 (type: string)"} + | outputColumnNames:["_col2","_col3","_col8","_col9","_col14","_col15"] | Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE |<-Reducer 11 [SIMPLE_EDGE] | Reduce Output Operator [RS_58] - | key expressions:_col15 (type: string), _col17 (type: string) - | Map-reduce partition columns:_col15 (type: string), _col17 (type: string) + | key expressions:_col10 (type: string), _col12 (type: string) + | Map-reduce partition columns:_col10 (type: string), _col12 (type: string) | sort order:++ | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE - | value expressions:_col6 (type: string), _col7 (type: string), _col14 (type: string) + | value expressions:_col3 (type: string), _col4 (type: string), _col9 (type: string) | Select Operator [SEL_49] - | outputColumnNames:["_col14","_col15","_col17","_col6","_col7"] + | outputColumnNames:["_col10","_col12","_col3","_col4","_col9"] | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE | Merge Join Operator [MERGEJOIN_109] | | condition map:[{"":"Inner Join 0 to 1"}] - | | keys:{"0":"_col4 (type: string), _col6 (type: string)","1":"_col2 (type: string), _col4 (type: string)"} - | | outputColumnNames:["_col2","_col3","_col14","_col15","_col17"] + | | keys:{"0":"_col3 (type: string), _col5 (type: string)","1":"_col1 (type: string), _col3 (type: string)"} + | | outputColumnNames:["_col1","_col2","_col9","_col10","_col12"] | | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE | |<-Reducer 10 [SIMPLE_EDGE] | | Reduce Output Operator [RS_45] - | | key expressions:_col4 (type: string), _col6 (type: string) - | | Map-reduce partition columns:_col4 (type: string), _col6 (type: string) + | | key expressions:_col3 (type: string), _col5 (type: string) + | | Map-reduce partition columns:_col3 (type: string), _col5 (type: string) | | sort order:++ | | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col3 (type: string) + | | value expressions:_col1 (type: string), _col2 (type: string) | | Merge Join Operator [MERGEJOIN_107] | | | condition map:[{"":"Inner Join 0 to 1"}] - | | | keys:{"0":"_col3 (type: string)","1":"_col1 (type: string)"} - | | | outputColumnNames:["_col2","_col3","_col4","_col6"] + | | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"} + | | | outputColumnNames:["_col1","_col2","_col3","_col5"] | | | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE | | |<-Map 14 [SIMPLE_EDGE] | | | Reduce Output Operator [RS_42] - | | | key expressions:_col1 (type: string) - | | | Map-reduce partition columns:_col1 (type: string) + | | | key expressions:_col0 (type: string) + | | | Map-reduce partition columns:_col0 (type: string) | | | sort order:+ | | | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | | | Select Operator [SEL_16] - | | | outputColumnNames:["_col1"] + | | | outputColumnNames:["_col0"] | | | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | | | Filter Operator [FIL_101] | | | predicate:((key = 'src1key') and value is not null) (type: boolean) @@ -423,15 +423,15 @@ Stage-0 | | | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE | | |<-Reducer 9 [SIMPLE_EDGE] | | Reduce Output Operator [RS_40] - | | key expressions:_col3 (type: string) - | | Map-reduce partition columns:_col3 (type: string) + | | key expressions:_col2 (type: string) + | | Map-reduce partition columns:_col2 (type: string) | | sort order:+ | | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col4 (type: string), _col6 (type: string) + | | value expressions:_col1 (type: string), _col3 (type: string), _col5 (type: string) | | Merge Join Operator [MERGEJOIN_106] | | | condition map:[{"":"Inner Join 0 to 1"}] - | | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"} - | | | outputColumnNames:["_col2","_col3","_col4","_col6"] + | | | keys:{"0":"_col1 (type: string)","1":"_col0 (type: string)"} + | | | outputColumnNames:["_col1","_col2","_col3","_col5"] | | | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE | | |<-Map 13 [SIMPLE_EDGE] | | | Reduce Output Operator [RS_37] @@ -450,15 +450,15 @@ Stage-0 | | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE | | |<-Reducer 8 [SIMPLE_EDGE] | | Reduce Output Operator [RS_35] - | | key expressions:_col2 (type: string) - | | Map-reduce partition columns:_col2 (type: string) + | | key expressions:_col1 (type: string) + | | Map-reduce partition columns:_col1 (type: string) | | sort order:+ | | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string) + | | value expressions:_col2 (type: string), _col3 (type: string), _col5 (type: string) | | Merge Join Operator [MERGEJOIN_105] | | | condition map:[{"":"Inner Join 0 to 1"}] - | | | keys:{"0":"_col1 (type: string)","1":"_col3 (type: string)"} - | | | outputColumnNames:["_col2","_col3","_col4","_col6"] + | | | keys:{"0":"_col0 (type: string)","1":"_col3 (type: string)"} + | | | outputColumnNames:["_col1","_col2","_col3","_col5"] | | | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE | | |<-Map 12 [SIMPLE_EDGE] | | | Reduce Output Operator [RS_32] @@ -478,12 +478,12 @@ Stage-0 | | | Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE | | |<-Map 7 [SIMPLE_EDGE] | | Reduce Output Operator [RS_30] - | | key expressions:_col1 (type: string) - | | Map-reduce partition columns:_col1 (type: string) + | | key expressions:_col0 (type: string) + | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE | | Select Operator [SEL_7] - | | outputColumnNames:["_col1"] + | | outputColumnNames:["_col0"] | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE | | Filter Operator [FIL_98] | | predicate:((key = 'srcpartkey') and value is not null) (type: boolean) @@ -493,15 +493,15 @@ Stage-0 | | Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE | |<-Reducer 16 [SIMPLE_EDGE] | Reduce Output Operator [RS_47] - | key expressions:_col2 (type: string), _col4 (type: string) - | Map-reduce partition columns:_col2 (type: string), _col4 (type: string) + | key expressions:_col1 (type: string), _col3 (type: string) + | Map-reduce partition columns:_col1 (type: string), _col3 (type: string) | sort order:++ | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE - | value expressions:_col3 (type: string), _col5 (type: string) + | value expressions:_col2 (type: string), _col4 (type: string) | Merge Join Operator [MERGEJOIN_108] | | condition map:[{"":"Inner Join 0 to 1"}] | | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"} - | | outputColumnNames:["_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE | |<-Map 15 [SIMPLE_EDGE] | | Reduce Output Operator [RS_24] @@ -509,9 +509,9 @@ Stage-0 | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string) + | | value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string) | | Select Operator [SEL_19] - | | outputColumnNames:["_col0","_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col0","_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE | | Filter Operator [FIL_102] | | predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean) @@ -1406,8 +1406,8 @@ Stage-0 Map Join Operator [MAPJOIN_28] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 1":"_col3 (type: string)","Map 3":"_col0 (type: string)"} - | outputColumnNames:["_col0","_col3","_col6"] + | keys:{"Map 1":"_col1 (type: string)","Map 3":"_col0 (type: string)"} + | outputColumnNames:["_col0","_col1","_col4"] | Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE |<-Map 3 [BROADCAST_EDGE] | Reduce Output Operator [RS_15] @@ -1429,7 +1429,7 @@ Stage-0 | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true | keys:{"Map 1":"_col0 (type: string)","Map 2":"_col1 (type: string)"} - | outputColumnNames:["_col0","_col3"] + | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE |<-Map 2 [BROADCAST_EDGE] | Reduce Output Operator [RS_10] @@ -1559,18 +1559,18 @@ Stage-0 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint) Group By Operator [GBY_61] - aggregations:["count(_col13)","count(_col21)","count(_col3)"] - keys:_col2 (type: string), _col12 (type: string), _col20 (type: string) + aggregations:["count(_col9)","count(_col15)","count(_col3)"] + keys:_col2 (type: string), _col8 (type: string), _col14 (type: string) outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Select Operator [SEL_60] - outputColumnNames:["_col2","_col12","_col20","_col13","_col21","_col3"] + outputColumnNames:["_col2","_col8","_col14","_col9","_col15","_col3"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Map Join Operator [MAPJOIN_110] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col15 (type: string), _col17 (type: string)"} - | outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"] + | keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col10 (type: string), _col12 (type: string)"} + | outputColumnNames:["_col2","_col3","_col8","_col9","_col14","_col15"] | Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE |<-Map 2 [BROADCAST_EDGE] | Reduce Output Operator [RS_56] @@ -1611,26 +1611,26 @@ Stage-0 | alias:d1 | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE |<-Select Operator [SEL_49] - outputColumnNames:["_col14","_col15","_col17","_col6","_col7"] + outputColumnNames:["_col10","_col12","_col3","_col4","_col9"] Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE Map Join Operator [MAPJOIN_109] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col4 (type: string), _col6 (type: string)","Map 10":"_col2 (type: string), _col4 (type: string)"} - | outputColumnNames:["_col2","_col3","_col14","_col15","_col17"] + | keys:{"Map 3":"_col3 (type: string), _col5 (type: string)","Map 10":"_col1 (type: string), _col3 (type: string)"} + | outputColumnNames:["_col1","_col2","_col9","_col10","_col12"] | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE |<-Map 10 [BROADCAST_EDGE] | Reduce Output Operator [RS_47] - | key expressions:_col2 (type: string), _col4 (type: string) - | Map-reduce partition columns:_col2 (type: string), _col4 (type: string) + | key expressions:_col1 (type: string), _col3 (type: string) + | Map-reduce partition columns:_col1 (type: string), _col3 (type: string) | sort order:++ | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE - | value expressions:_col3 (type: string), _col5 (type: string) + | value expressions:_col2 (type: string), _col4 (type: string) | Map Join Operator [MAPJOIN_108] | | condition map:[{"":"Inner Join 0 to 1"}] | | HybridGraceHashJoin:true | | keys:{"Map 9":"_col0 (type: string)","Map 10":"_col0 (type: string)"} - | | outputColumnNames:["_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE | |<-Map 9 [BROADCAST_EDGE] | | Reduce Output Operator [RS_24] @@ -1638,9 +1638,9 @@ Stage-0 | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string) + | | value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string) | | Select Operator [SEL_19] - | | outputColumnNames:["_col0","_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col0","_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE | | Filter Operator [FIL_102] | | predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean) @@ -1660,17 +1660,17 @@ Stage-0 |<-Map Join Operator [MAPJOIN_107] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col3 (type: string)","Map 8":"_col1 (type: string)"} - | outputColumnNames:["_col2","_col3","_col4","_col6"] + | keys:{"Map 3":"_col2 (type: string)","Map 8":"_col0 (type: string)"} + | outputColumnNames:["_col1","_col2","_col3","_col5"] | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE |<-Map 8 [BROADCAST_EDGE] | Reduce Output Operator [RS_42] - | key expressions:_col1 (type: string) - | Map-reduce partition columns:_col1 (type: string) + | key expressions:_col0 (type: string) + | Map-reduce partition columns:_col0 (type: string) | sort order:+ | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | Select Operator [SEL_16] - | outputColumnNames:["_col1"] + | outputColumnNames:["_col0"] | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | Filter Operator [FIL_101] | predicate:((key = 'src1key') and value is not null) (type: boolean) @@ -1681,8 +1681,8 @@ Stage-0 |<-Map Join Operator [MAPJOIN_106] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col2 (type: string)","Map 7":"_col0 (type: string)"} - | outputColumnNames:["_col2","_col3","_col4","_col6"] + | keys:{"Map 3":"_col1 (type: string)","Map 7":"_col0 (type: string)"} + | outputColumnNames:["_col1","_col2","_col3","_col5"] | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE |<-Map 7 [BROADCAST_EDGE] | Reduce Output Operator [RS_37] @@ -1702,8 +1702,8 @@ Stage-0 |<-Map Join Operator [MAPJOIN_105] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col1 (type: string)","Map 6":"_col3 (type: string)"} - | outputColumnNames:["_col2","_col3","_col4","_col6"] + | keys:{"Map 3":"_col0 (type: string)","Map 6":"_col3 (type: string)"} + | outputColumnNames:["_col1","_col2","_col3","_col5"] | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE |<-Map 6 [BROADCAST_EDGE] | Reduce Output Operator [RS_32] @@ -1722,7 +1722,7 @@ Stage-0 | alias:ss | Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE |<-Select Operator [SEL_7] - outputColumnNames:["_col1"] + outputColumnNames:["_col0"] Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator [FIL_98] predicate:((key = 'srcpartkey') and value is not null) (type: boolean) diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out index 9a5d047..5bc04d7 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out @@ -118,7 +118,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -268,7 +268,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -419,7 +419,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out index d2937a5..9512bde 100644 --- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out @@ -205,8 +205,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 input vertices: 1 Map 2 @@ -224,21 +224,21 @@ STAGE PLANS: alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean) + predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int) - outputColumnNames: _col0 + expressions: l_orderkey (type: int), l_linenumber (type: int) + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int) + keys: _col0 (type: int), _col1 (type: int) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) + key expressions: _col0 (type: int), _col1 (type: int) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out index da2033b..4f450ee 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out @@ -1778,7 +1778,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -1802,42 +1801,42 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1873,8 +1872,25 @@ STAGE PLANS: Select Operator Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: '2008-04-08' (type: string) + sort order: + + Map-reduce partition columns: '2008-04-08' (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2008-04-08' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 1 Stage: Stage-0 Fetch Operator @@ -1882,18 +1898,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' @@ -4148,7 +4167,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -4171,33 +4189,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 2 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 3 Execution mode: vectorized Reduce Operator Tree: @@ -4212,11 +4230,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) input vertices: 0 Map 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true Group By Operator aggregations: count() mode: hash @@ -4248,18 +4267,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' diff --git a/ql/src/test/results/clientpositive/udf1.q.out b/ql/src/test/results/clientpositive/udf1.q.out index dffbccf..b3b694b 100644 --- a/ql/src/test/results/clientpositive/udf1.q.out +++ b/ql/src/test/results/clientpositive/udf1.q.out @@ -137,26 +137,26 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c10 EXPRESSION [] -POSTHOOK: Lineage: dest1.c11 EXPRESSION [] -POSTHOOK: Lineage: dest1.c12 EXPRESSION [] -POSTHOOK: Lineage: dest1.c13 EXPRESSION [] -POSTHOOK: Lineage: dest1.c14 EXPRESSION [] -POSTHOOK: Lineage: dest1.c15 EXPRESSION [] -POSTHOOK: Lineage: dest1.c16 EXPRESSION [] -POSTHOOK: Lineage: dest1.c17 EXPRESSION [] -POSTHOOK: Lineage: dest1.c18 EXPRESSION [] -POSTHOOK: Lineage: dest1.c19 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c20 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c10 SIMPLE [] +POSTHOOK: Lineage: dest1.c11 SIMPLE [] +POSTHOOK: Lineage: dest1.c12 SIMPLE [] +POSTHOOK: Lineage: dest1.c13 SIMPLE [] +POSTHOOK: Lineage: dest1.c14 SIMPLE [] +POSTHOOK: Lineage: dest1.c15 SIMPLE [] +POSTHOOK: Lineage: dest1.c16 SIMPLE [] +POSTHOOK: Lineage: dest1.c17 SIMPLE [] +POSTHOOK: Lineage: dest1.c18 SIMPLE [] +POSTHOOK: Lineage: dest1.c19 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] +POSTHOOK: Lineage: dest1.c20 SIMPLE [] +POSTHOOK: Lineage: dest1.c3 SIMPLE [] +POSTHOOK: Lineage: dest1.c4 SIMPLE [] +POSTHOOK: Lineage: dest1.c5 SIMPLE [] +POSTHOOK: Lineage: dest1.c6 SIMPLE [] +POSTHOOK: Lineage: dest1.c7 SIMPLE [] +POSTHOOK: Lineage: dest1.c8 SIMPLE [] +POSTHOOK: Lineage: dest1.c9 SIMPLE [] PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 diff --git a/ql/src/test/results/clientpositive/udf_10_trims.q.out b/ql/src/test/results/clientpositive/udf_10_trims.q.out index 2f79723..3a5303a 100644 --- a/ql/src/test/results/clientpositive/udf_10_trims.q.out +++ b/ql/src/test/results/clientpositive/udf_10_trims.q.out @@ -117,4 +117,4 @@ WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] diff --git a/ql/src/test/results/clientpositive/udf_concat_insert2.q.out b/ql/src/test/results/clientpositive/udf_concat_insert2.q.out index f1b70fe..d68bd76 100644 --- a/ql/src/test/results/clientpositive/udf_concat_insert2.q.out +++ b/ql/src/test/results/clientpositive/udf_concat_insert2.q.out @@ -16,7 +16,7 @@ INSERT OVERWRITE TABLE dest1 SELECT concat('1234', 'abc', 'extra argument'), src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [] +POSTHOOK: Lineage: dest1.key SIMPLE [] POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/union_date_trim.q.out b/ql/src/test/results/clientpositive/union_date_trim.q.out index e2f5269..324e8b7 100644 --- a/ql/src/test/results/clientpositive/union_date_trim.q.out +++ b/ql/src/test/results/clientpositive/union_date_trim.q.out @@ -51,4 +51,4 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testdate POSTHOOK: Output: default@testdate POSTHOOK: Lineage: testdate.dt EXPRESSION [(testdate)testdate.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: testdate.id EXPRESSION [(testdate)testdate.FieldSchema(name:id, type:int, comment:null), ] +POSTHOOK: Lineage: testdate.id EXPRESSION [] diff --git a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out index 1562087..b3fc7d3 100644 --- a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out +++ b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out @@ -527,7 +527,7 @@ STAGE PLANS: predicate: ((if(true, f1, f2) = 1) and (f1 = 1)) (type: boolean) Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 1 (type: int) + expressions: if(true, 1, f2) (type: int) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Union @@ -550,7 +550,7 @@ STAGE PLANS: predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: 1 (type: int) + expressions: 0 (type: int) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Union diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/vector_decimal_round.q.out index 25e5cfa..ec6226e 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out @@ -106,7 +106,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -242,7 +242,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -379,7 +379,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out index ee74fbe..4c6481e 100644 --- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out @@ -457,30 +457,30 @@ STAGE PLANS: Stage: Stage-11 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:$hdt$_1:lineitem + $hdt$_1:lineitem Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:$hdt$_1:lineitem + $hdt$_1:lineitem TableScan alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean) + predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int) - outputColumnNames: _col0 + expressions: l_orderkey (type: int), l_linenumber (type: int) + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int) + keys: _col0 (type: int), _col1 (type: int) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) Stage: Stage-8 Map Reduce @@ -499,8 +499,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 Statistics: Num rows: 14 Data size: 1714 Basic stats: COMPLETE Column stats: NONE File Output Operator