diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java new file mode 100644 index 0000000..bbf518d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.calcite; + +import java.util.HashSet; +import java.util.List; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagate; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.RexNodeConverter; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; + +public class HiveRexExecutorImpl implements RelOptPlanner.Executor { + + private final RelOptCluster cluster; + + public HiveRexExecutorImpl(RelOptCluster cluster) { + this.cluster = cluster; + } + + @Override + public void reduce(RexBuilder rexBuilder, List constExps, List reducedValues) { + RexNodeConverter rexNodeConverter = new RexNodeConverter(cluster); + for (RexNode rexNode : constExps) { + // initialize the converter + ExprNodeConverter converter = new ExprNodeConverter("", null, null, null, + new HashSet(), cluster.getTypeFactory()); + // convert RexNode to ExprNodeGenericFuncDesc + ExprNodeDesc expr = rexNode.accept(converter); + if (expr instanceof ExprNodeGenericFuncDesc) { + // folding the constant + ExprNodeDesc constant = ConstantPropagateProcFactory + .foldExpr((ExprNodeGenericFuncDesc) expr); + if (constant != null) { + try { + // convert constant back to RexNode + reducedValues.add(rexNodeConverter.convert((ExprNodeConstantDesc) constant)); + } catch (Exception e) { + e.printStackTrace(); + reducedValues.add(rexNode); + } + } else { + reducedValues.add(rexNode); + } + } else { + reducedValues.add(rexNode); + } + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java new file mode 100644 index 0000000..9969859 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java @@ -0,0 +1,964 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptPredicateList; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Calc; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.core.JoinInfo; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rel.core.RelFactories; +import org.apache.calcite.rel.logical.LogicalCalc; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexCorrelVariable; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.rex.RexFieldAccess; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexLocalRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexOver; +import org.apache.calcite.rex.RexProgram; +import org.apache.calcite.rex.RexProgramBuilder; +import org.apache.calcite.rex.RexRangeRef; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.rex.RexVisitorImpl; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.fun.SqlRowOperator; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.tools.RelBuilderFactory; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.Stacks; +import org.apache.calcite.util.Util; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +/** + * Collection of planner rules that apply various simplifying transformations on + * RexNode trees. Currently, there are two transformations: + * + *
    + *
  • Constant reduction, which evaluates constant subtrees, replacing them + * with a corresponding RexLiteral + *
  • Removal of redundant casts, which occurs when the argument into the cast + * is the same as the type of the resulting cast expression + *
+ */ +public abstract class HiveReduceExpressionsRule extends RelOptRule { + //~ Static fields/initializers --------------------------------------------- + + /** + * Regular expression that matches the description of all instances of this + * rule and {@link ValuesReduceRule} also. Use + * it to prevent the planner from invoking these rules. + */ + public static final Pattern EXCLUSION_PATTERN = + Pattern.compile("Reduce(Expressions|Values)Rule.*"); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.logical.HiveFilter}. + */ + public static final HiveReduceExpressionsRule FILTER_INSTANCE = + new FilterReduceExpressionsRule(HiveFilter.class, RelFactories.LOGICAL_BUILDER); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.logical.HiveProject}. + */ + public static final HiveReduceExpressionsRule PROJECT_INSTANCE = + new ProjectReduceExpressionsRule(HiveProject.class, RelFactories.LOGICAL_BUILDER); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.core.HiveJoin}. + */ + public static final HiveReduceExpressionsRule JOIN_INSTANCE = + new JoinReduceExpressionsRule(HiveJoin.class, RelFactories.LOGICAL_BUILDER); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.logical.LogicalCalc}. + */ + public static final HiveReduceExpressionsRule CALC_INSTANCE = + new CalcReduceExpressionsRule(LogicalCalc.class, RelFactories.LOGICAL_BUILDER); + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Filter}. + * If the condition is a constant, the filter is removed (if TRUE) or replaced with + * an empty {@link org.apache.calcite.rel.core.Values} (if FALSE or NULL). + */ + public static class FilterReduceExpressionsRule extends HiveReduceExpressionsRule { + + public FilterReduceExpressionsRule(Class filterClass, + RelBuilderFactory relBuilderFactory) { + super(filterClass, relBuilderFactory, "HiveReduceExpressionsRule(Filter)"); + } + + @Override public void onMatch(RelOptRuleCall call) { + final Filter filter = call.rel(0); + final List expList = + Lists.newArrayList(filter.getCondition()); + RexNode newConditionExp; + boolean reduced; + final RelOptPredicateList predicates = + RelMetadataQuery.getPulledUpPredicates(filter.getInput()); + if (reduceExpressions(filter, expList, predicates)) { + assert expList.size() == 1; + newConditionExp = expList.get(0); + reduced = true; + } else { + // No reduction, but let's still test the original + // predicate to see if it was already a constant, + // in which case we don't need any runtime decision + // about filtering. + newConditionExp = filter.getCondition(); + reduced = false; + } + if (newConditionExp.isAlwaysTrue()) { + call.transformTo( + filter.getInput()); + } + // TODO: support LogicalValues + // else if (newConditionExp instanceof RexLiteral + // || RexUtil.isNullLiteral(newConditionExp, true)) { + // call.transformTo(call.builder().values(filter.getRowType()).build()); + // } + else if (reduced) { + call.transformTo(call.builder(). + push(filter.getInput()).filter(expList.get(0)).build()); + } else { + if (newConditionExp instanceof RexCall) { + RexCall rexCall = (RexCall) newConditionExp; + boolean reverse = + rexCall.getOperator() + == SqlStdOperatorTable.NOT; + if (reverse) { + rexCall = (RexCall) rexCall.getOperands().get(0); + } + reduceNotNullableFilter(call, filter, rexCall, reverse); + } + return; + } + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(filter, 0.0); + } + + private void reduceNotNullableFilter( + RelOptRuleCall call, + Filter filter, + RexCall rexCall, + boolean reverse) { + // If the expression is a IS [NOT] NULL on a non-nullable + // column, then we can either remove the filter or replace + // it with an Empty. + boolean alwaysTrue; + switch (rexCall.getKind()) { + case IS_NULL: + case IS_UNKNOWN: + alwaysTrue = false; + break; + case IS_NOT_NULL: + alwaysTrue = true; + break; + default: + return; + } + if (reverse) { + alwaysTrue = !alwaysTrue; + } + RexNode operand = rexCall.getOperands().get(0); + if (operand instanceof RexInputRef) { + RexInputRef inputRef = (RexInputRef) operand; + if (!inputRef.getType().isNullable()) { + if (alwaysTrue) { + call.transformTo(filter.getInput()); + } else { + call.transformTo(call.builder().values(filter.getRowType()).build()); + } + } + } + } + } + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Project}. + */ + public static class ProjectReduceExpressionsRule extends HiveReduceExpressionsRule { + + public ProjectReduceExpressionsRule(Class projectClass, + RelBuilderFactory relBuilderFactory) { + super(projectClass, relBuilderFactory, "HiveReduceExpressionsRule(Project)"); + } + + @Override public void onMatch(RelOptRuleCall call) { + Project project = call.rel(0); + final RelOptPredicateList predicates = + RelMetadataQuery.getPulledUpPredicates(project.getInput()); + final List expList = + Lists.newArrayList(project.getProjects()); + if (reduceExpressions(project, expList, predicates)) { + call.transformTo( + call.builder() + .push(project.getInput()) + .project(expList, project.getRowType().getFieldNames()) + .build()); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(project, 0.0); + } + } + } + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.HiveJoin}. + */ + public static class JoinReduceExpressionsRule extends HiveReduceExpressionsRule { + + public JoinReduceExpressionsRule(Class joinClass, + RelBuilderFactory relBuilderFactory) { + super(joinClass, relBuilderFactory, "HiveReduceExpressionsRule(HiveJoin)"); + } + + @Override public void onMatch(RelOptRuleCall call) { + final HiveJoin join = call.rel(0); + final List expList = Lists.newArrayList(join.getCondition()); + final int fieldCount = join.getLeft().getRowType().getFieldCount(); + final RelOptPredicateList leftPredicates = + RelMetadataQuery.getPulledUpPredicates(join.getLeft()); + final RelOptPredicateList rightPredicates = + RelMetadataQuery.getPulledUpPredicates(join.getRight()); + final RelOptPredicateList predicates = + leftPredicates.union(rightPredicates.shift(fieldCount)); + if (!reduceExpressions(join, expList, predicates)) { + return; + } + final JoinInfo joinInfo = JoinInfo.of(join.getLeft(), join.getRight(), expList.get(0)); + if (!joinInfo.isEqui()) { + // This kind of join must be an equi-join, and the condition is + // no longer an equi-join. SemiJoin is an example of this. + return; + } + call.transformTo( + join.copy( + join.getTraitSet(), + expList.get(0), + join.getLeft(), + join.getRight(), + join.getJoinType(), + join.isSemiJoinDone())); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(join, 0.0); + } + } + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Calc}. + */ + public static class CalcReduceExpressionsRule extends HiveReduceExpressionsRule { + + public CalcReduceExpressionsRule(Class calcClass, + RelBuilderFactory relBuilderFactory) { + super(calcClass, relBuilderFactory, "HiveReduceExpressionsRule(Calc)"); + } + + @Override public void onMatch(RelOptRuleCall call) { + Calc calc = call.rel(0); + RexProgram program = calc.getProgram(); + final List exprList = program.getExprList(); + + // Form a list of expressions with sub-expressions fully expanded. + final List expandedExprList = Lists.newArrayList(); + final RexShuttle shuttle = + new RexShuttle() { + public RexNode visitLocalRef(RexLocalRef localRef) { + return expandedExprList.get(localRef.getIndex()); + } + }; + for (RexNode expr : exprList) { + expandedExprList.add(expr.accept(shuttle)); + } + final RelOptPredicateList predicates = RelOptPredicateList.EMPTY; + if (reduceExpressions(calc, expandedExprList, predicates)) { + final RexProgramBuilder builder = + new RexProgramBuilder( + calc.getInput().getRowType(), + calc.getCluster().getRexBuilder()); + final List list = Lists.newArrayList(); + for (RexNode expr : expandedExprList) { + list.add(builder.registerInput(expr)); + } + if (program.getCondition() != null) { + final int conditionIndex = + program.getCondition().getIndex(); + final RexNode newConditionExp = + expandedExprList.get(conditionIndex); + if (newConditionExp.isAlwaysTrue()) { + // condition is always TRUE - drop it + } else if (newConditionExp instanceof RexLiteral + || RexUtil.isNullLiteral(newConditionExp, true)) { + // condition is always NULL or FALSE - replace calc + // with empty + call.transformTo(call.builder().values(calc.getRowType()).build()); + return; + } else { + builder.addCondition(list.get(conditionIndex)); + } + } + int k = 0; + for (RexLocalRef projectExpr : program.getProjectList()) { + final int index = projectExpr.getIndex(); + builder.addProject( + list.get(index).getIndex(), + program.getOutputRowType().getFieldNames().get(k++)); + } + call.transformTo( + calc.copy(calc.getTraitSet(), calc.getInput(), builder.getProgram())); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(calc, 0.0); + } + } + } + + //~ Constructors ----------------------------------------------------------- + + /** + * Creates a HiveReduceExpressionsRule. + * + * @param clazz class of rels to which this rule should apply + */ + protected HiveReduceExpressionsRule(Class clazz, + RelBuilderFactory relBuilderFactory, String desc) { + super(operand(clazz, any()), relBuilderFactory, desc); + } + + //~ Methods ---------------------------------------------------------------- + + /** + * Reduces a list of expressions. + * + * @param rel Relational expression + * @param expList List of expressions, modified in place + * @param predicates Constraints known to hold on input expressions + * @return whether reduction found something to change, and succeeded + */ + protected static boolean reduceExpressions(RelNode rel, List expList, + RelOptPredicateList predicates) { + RexBuilder rexBuilder = rel.getCluster().getRexBuilder(); + + // Replace predicates on CASE to CASE on predicates. + new CaseShuttle().mutate(expList); + + // Find reducible expressions. + final List constExps = Lists.newArrayList(); + List addCasts = Lists.newArrayList(); + final List removableCasts = Lists.newArrayList(); + final ImmutableMap constants = + predicateConstants(predicates); + findReducibleExps(rel.getCluster().getTypeFactory(), expList, constants, + constExps, addCasts, removableCasts); + if (constExps.isEmpty() && removableCasts.isEmpty()) { + return false; + } + + // Remove redundant casts before reducing constant expressions. + // If the argument to the redundant cast is a reducible constant, + // reducing that argument to a constant first will result in not being + // able to locate the original cast expression. + if (!removableCasts.isEmpty()) { + final List reducedExprs = Lists.newArrayList(); + for (RexNode exp : removableCasts) { + RexCall call = (RexCall) exp; + reducedExprs.add(call.getOperands().get(0)); + } + RexReplacer replacer = + new RexReplacer( + rexBuilder, + removableCasts, + reducedExprs, + Collections.nCopies(removableCasts.size(), false)); + replacer.mutate(expList); + } + + if (constExps.isEmpty()) { + return true; + } + + final List constExps2 = Lists.newArrayList(constExps); + if (!constants.isEmpty()) { + //noinspection unchecked + final List> pairs = + (List>) (List) + Lists.newArrayList(constants.entrySet()); + RexReplacer replacer = + new RexReplacer( + rexBuilder, + Pair.left(pairs), + Pair.right(pairs), + Collections.nCopies(pairs.size(), false)); + replacer.mutate(constExps2); + } + + // Compute the values they reduce to. + RelOptPlanner.Executor executor = + rel.getCluster().getPlanner().getExecutor(); + if (executor == null) { + // Cannot reduce expressions: caller has not set an executor in their + // environment. Caller should execute something like the following before + // invoking the planner: + // + // final RexExecutorImpl executor = + // new RexExecutorImpl(Schemas.createDataContext(null)); + // rootRel.getCluster().getPlanner().setExecutor(executor); + return false; + } + + final List reducedValues = Lists.newArrayList(); + executor.reduce(rexBuilder, constExps2, reducedValues); + + // For Project, we have to be sure to preserve the result + // types, so always cast regardless of the expression type. + // For other RelNodes like Filter, in general, this isn't necessary, + // and the presence of casts could hinder other rules such as sarg + // analysis, which require bare literals. But there are special cases, + // like when the expression is a UDR argument, that need to be + // handled as special cases. + if (rel instanceof Project) { + addCasts = Collections.nCopies(reducedValues.size(), true); + } + + RexReplacer replacer = + new RexReplacer( + rexBuilder, + constExps, + reducedValues, + addCasts); + replacer.mutate(expList); + return true; + } + + /** + * Locates expressions that can be reduced to literals or converted to + * expressions with redundant casts removed. + * + * @param typeFactory Type factory + * @param exps list of candidate expressions to be examined for + * reduction + * @param constants List of expressions known to be constant + * @param constExps returns the list of expressions that can be constant + * reduced + * @param addCasts indicator for each expression that can be constant + * reduced, whether a cast of the resulting reduced + * expression is potentially necessary + * @param removableCasts returns the list of cast expressions where the cast + */ + protected static void findReducibleExps(RelDataTypeFactory typeFactory, + List exps, ImmutableMap constants, + List constExps, List addCasts, + List removableCasts) { + ReducibleExprLocator gardener = + new ReducibleExprLocator(typeFactory, constants, constExps, + addCasts, removableCasts); + for (RexNode exp : exps) { + gardener.analyze(exp); + } + assert constExps.size() == addCasts.size(); + } + + protected static ImmutableMap predicateConstants( + RelOptPredicateList predicates) { + // We cannot use an ImmutableMap.Builder here. If there are multiple entries + // with the same key (e.g. "WHERE deptno = 1 AND deptno = 2"), it doesn't + // matter which we take, so the latter will replace the former. + // The basic idea is to find all the pairs of RexNode = RexLiteral + // (1) If 'predicates' contain a non-EQUALS, we bail out. + // (2) It is OK if a RexNode is equal to the same RexLiteral several times, + // (e.g. "WHERE deptno = 1 AND deptno = 1") + // (3) It will return false if there are inconsistent constraints (e.g. + // "WHERE deptno = 1 AND deptno = 2") + final Map map = new HashMap<>(); + final Set excludeSet = new HashSet<>(); + for (RexNode predicate : predicates.pulledUpPredicates) { + gatherConstraints(map, predicate, excludeSet); + } + final ImmutableMap.Builder builder = + ImmutableMap.builder(); + for (Map.Entry entry : map.entrySet()) { + RexNode rexNode = entry.getKey(); + if (!overlap(rexNode, excludeSet)) { + builder.put(rexNode, entry.getValue()); + } + } + return builder.build(); + } + + private static boolean overlap(RexNode rexNode, Set set) { + if (rexNode instanceof RexCall) { + for (RexNode r : ((RexCall) rexNode).getOperands()) { + if (overlap(r, set)) { + return true; + } + } + return false; + } else { + return set.contains(rexNode); + } + } + + /** Tries to decompose the RexNode which is a RexCall into non-literal + * RexNodes. */ + private static void decompose(Set set, RexNode rexNode) { + if (rexNode instanceof RexCall) { + for (RexNode r : ((RexCall) rexNode).getOperands()) { + decompose(set, r); + } + } else if (!(rexNode instanceof RexLiteral)) { + set.add(rexNode); + } + } + + private static void gatherConstraints(Map map, + RexNode predicate, Set excludeSet) { + if (predicate.getKind() != SqlKind.EQUALS) { + decompose(excludeSet, predicate); + return; + } + final List operands = ((RexCall) predicate).getOperands(); + if (operands.size() != 2) { + decompose(excludeSet, predicate); + return; + } + // if it reaches here, we have rexNode equals rexNode + final RexNode left = operands.get(0); + final RexNode right = operands.get(1); + // note that literals are immutable too and they can only be compared through + // values. + if (right instanceof RexLiteral && !excludeSet.contains(left)) { + RexLiteral existedValue = map.get(left); + if (existedValue == null) { + map.put(left, (RexLiteral) right); + } else { + if (!existedValue.getValue().equals(((RexLiteral) right).getValue())) { + // we found conflict values. + map.remove(left); + excludeSet.add(left); + } + } + } else if (left instanceof RexLiteral && !excludeSet.contains(right)) { + RexLiteral existedValue = map.get(right); + if (existedValue == null) { + map.put(right, (RexLiteral) left); + } else { + if (!existedValue.getValue().equals(((RexLiteral) left).getValue())) { + map.remove(right); + excludeSet.add(right); + } + } + } + } + + /** Pushes predicates into a CASE. + * + *

We have a loose definition of 'predicate': any boolean expression will + * do, except CASE. For example '(CASE ...) = 5' or '(CASE ...) IS NULL'. + */ + protected static RexCall pushPredicateIntoCase(RexCall call) { + if (call.getType().getSqlTypeName() != SqlTypeName.BOOLEAN) { + return call; + } + switch (call.getKind()) { + case CASE: + case AND: + case OR: + return call; // don't push CASE into CASE! + } + int caseOrdinal = -1; + final List operands = call.getOperands(); + for (int i = 0; i < operands.size(); i++) { + RexNode operand = operands.get(i); + switch (operand.getKind()) { + case CASE: + caseOrdinal = i; + } + } + if (caseOrdinal < 0) { + return call; + } + // Convert + // f(CASE WHEN p1 THEN v1 ... END, arg) + // to + // CASE WHEN p1 THEN f(v1, arg) ... END + final RexCall case_ = (RexCall) operands.get(caseOrdinal); + final List nodes = new ArrayList<>(); + for (int i = 0; i < case_.getOperands().size(); i++) { + RexNode node = case_.getOperands().get(i); + if (!RexUtil.isCasePredicate(case_, i)) { + node = substitute(call, caseOrdinal, node); + } + nodes.add(node); + } + return case_.clone(call.getType(), nodes); + } + + /** Converts op(arg0, ..., argOrdinal, ..., argN) to op(arg0,..., node, ..., argN). */ + protected static RexNode substitute(RexCall call, int ordinal, RexNode node) { + final List newOperands = Lists.newArrayList(call.getOperands()); + newOperands.set(ordinal, node); + return call.clone(call.getType(), newOperands); + } + + //~ Inner Classes ---------------------------------------------------------- + + /** + * Replaces expressions with their reductions. Note that we only have to + * look for RexCall, since nothing else is reducible in the first place. + */ + protected static class RexReplacer extends RexShuttle { + private final RexBuilder rexBuilder; + private final List reducibleExps; + private final List reducedValues; + private final List addCasts; + + RexReplacer( + RexBuilder rexBuilder, + List reducibleExps, + List reducedValues, + List addCasts) { + this.rexBuilder = rexBuilder; + this.reducibleExps = reducibleExps; + this.reducedValues = reducedValues; + this.addCasts = addCasts; + } + + @Override public RexNode visitInputRef(RexInputRef inputRef) { + RexNode node = visit(inputRef); + if (node == null) { + return super.visitInputRef(inputRef); + } + return node; + } + + @Override public RexNode visitCall(RexCall call) { + RexNode node = visit(call); + if (node != null) { + return node; + } + node = super.visitCall(call); + if (node != call) { + node = RexUtil.simplify(rexBuilder, node); + } + return node; + } + + private RexNode visit(final RexNode call) { + int i = reducibleExps.indexOf(call); + if (i == -1) { + return null; + } + RexNode replacement = reducedValues.get(i); + if (addCasts.get(i) + && (replacement.getType() != call.getType())) { + // Handle change from nullable to NOT NULL by claiming + // that the result is still nullable, even though + // we know it isn't. + // + // Also, we cannot reduce CAST('abc' AS VARCHAR(4)) to 'abc'. + // If we make 'abc' of type VARCHAR(4), we may later encounter + // the same expression in a Project's digest where it has + // type VARCHAR(3), and that's wrong. + replacement = rexBuilder.makeCast(call.getType(), replacement, true); + } + return replacement; + } + } + + /** + * Helper class used to locate expressions that either can be reduced to + * literals or contain redundant casts. + */ + protected static class ReducibleExprLocator extends RexVisitorImpl { + /** Whether an expression is constant, and if so, whether it can be + * reduced to a simpler constant. */ + enum Constancy { + NON_CONSTANT, REDUCIBLE_CONSTANT, IRREDUCIBLE_CONSTANT + } + + private final RelDataTypeFactory typeFactory; + + private final List stack; + + private final ImmutableMap constants; + + private final List constExprs; + + private final List addCasts; + + private final List removableCasts; + + private final List parentCallTypeStack; + + ReducibleExprLocator(RelDataTypeFactory typeFactory, + ImmutableMap constants, List constExprs, + List addCasts, List removableCasts) { + // go deep + super(true); + this.typeFactory = typeFactory; + this.constants = constants; + this.constExprs = constExprs; + this.addCasts = addCasts; + this.removableCasts = removableCasts; + this.stack = Lists.newArrayList(); + this.parentCallTypeStack = Lists.newArrayList(); + } + + public void analyze(RexNode exp) { + assert stack.isEmpty(); + + exp.accept(this); + + // Deal with top of stack + assert stack.size() == 1; + assert parentCallTypeStack.isEmpty(); + Constancy rootConstancy = stack.get(0); + if (rootConstancy == Constancy.REDUCIBLE_CONSTANT) { + // The entire subtree was constant, so add it to the result. + addResult(exp); + } + stack.clear(); + } + + private Void pushVariable() { + stack.add(Constancy.NON_CONSTANT); + return null; + } + + private void addResult(RexNode exp) { + // Cast of literal can't be reduced, so skip those (otherwise we'd + // go into an infinite loop as we add them back). + if (exp.getKind() == SqlKind.CAST) { + RexCall cast = (RexCall) exp; + RexNode operand = cast.getOperands().get(0); + if (operand instanceof RexLiteral) { + return; + } + } + constExprs.add(exp); + + // In the case where the expression corresponds to a UDR argument, + // we need to preserve casts. Note that this only applies to + // the topmost argument, not expressions nested within the UDR + // call. + // + // REVIEW zfong 6/13/08 - Are there other expressions where we + // also need to preserve casts? + if (parentCallTypeStack.isEmpty()) { + addCasts.add(false); + } else { + addCasts.add(isUdf(Stacks.peek(parentCallTypeStack))); + } + } + + private Boolean isUdf(SqlOperator operator) { + // return operator instanceof UserDefinedRoutine + return false; + } + + public Void visitInputRef(RexInputRef inputRef) { + if (constants.containsKey(inputRef)) { + stack.add(Constancy.REDUCIBLE_CONSTANT); + return null; + } + return pushVariable(); + } + + public Void visitLiteral(RexLiteral literal) { + stack.add(Constancy.IRREDUCIBLE_CONSTANT); + return null; + } + + public Void visitOver(RexOver over) { + // assume non-constant (running SUM(1) looks constant but isn't) + analyzeCall(over, Constancy.NON_CONSTANT); + return null; + } + + public Void visitCorrelVariable(RexCorrelVariable correlVariable) { + return pushVariable(); + } + + public Void visitCall(RexCall call) { + // assume REDUCIBLE_CONSTANT until proven otherwise + analyzeCall(call, Constancy.REDUCIBLE_CONSTANT); + return null; + } + + private void analyzeCall(RexCall call, Constancy callConstancy) { + Stacks.push(parentCallTypeStack, call.getOperator()); + + // visit operands, pushing their states onto stack + super.visitCall(call); + + // look for NON_CONSTANT operands + int operandCount = call.getOperands().size(); + List operandStack = Util.last(stack, operandCount); + for (Constancy operandConstancy : operandStack) { + if (operandConstancy == Constancy.NON_CONSTANT) { + callConstancy = Constancy.NON_CONSTANT; + } + } + + // Even if all operands are constant, the call itself may + // be non-deterministic. + if (!call.getOperator().isDeterministic()) { + callConstancy = Constancy.NON_CONSTANT; + } else if (call.getOperator().isDynamicFunction()) { + // We can reduce the call to a constant, but we can't + // cache the plan if the function is dynamic. + // For now, treat it same as non-deterministic. + callConstancy = Constancy.NON_CONSTANT; + } + + // Row operator itself can't be reduced to a literal, but if + // the operands are constants, we still want to reduce those + if ((callConstancy == Constancy.REDUCIBLE_CONSTANT) + && (call.getOperator() instanceof SqlRowOperator)) { + callConstancy = Constancy.NON_CONSTANT; + } + + if (callConstancy == Constancy.NON_CONSTANT) { + // any REDUCIBLE_CONSTANT children are now known to be maximal + // reducible subtrees, so they can be added to the result + // list + for (int iOperand = 0; iOperand < operandCount; ++iOperand) { + Constancy constancy = operandStack.get(iOperand); + if (constancy == Constancy.REDUCIBLE_CONSTANT) { + addResult(call.getOperands().get(iOperand)); + } + } + + // if this cast expression can't be reduced to a literal, + // then see if we can remove the cast + if (call.getOperator() == SqlStdOperatorTable.CAST) { + reduceCasts(call); + } + } + + // pop operands off of the stack + operandStack.clear(); + + // pop this parent call operator off the stack + Stacks.pop(parentCallTypeStack, call.getOperator()); + + // push constancy result for this call onto stack + stack.add(callConstancy); + } + + private void reduceCasts(RexCall outerCast) { + List operands = outerCast.getOperands(); + if (operands.size() != 1) { + return; + } + RelDataType outerCastType = outerCast.getType(); + RelDataType operandType = operands.get(0).getType(); + if (operandType.equals(outerCastType)) { + removableCasts.add(outerCast); + return; + } + + // See if the reduction + // CAST((CAST x AS type) AS type NOT NULL) + // -> CAST(x AS type NOT NULL) + // applies. TODO jvs 15-Dec-2008: consider + // similar cases for precision changes. + if (!(operands.get(0) instanceof RexCall)) { + return; + } + RexCall innerCast = (RexCall) operands.get(0); + if (innerCast.getOperator() != SqlStdOperatorTable.CAST) { + return; + } + if (innerCast.getOperands().size() != 1) { + return; + } + RelDataType outerTypeNullable = + typeFactory.createTypeWithNullability(outerCastType, true); + RelDataType innerTypeNullable = + typeFactory.createTypeWithNullability(operandType, true); + if (outerTypeNullable != innerTypeNullable) { + return; + } + if (operandType.isNullable()) { + removableCasts.add(innerCast); + } + } + + public Void visitDynamicParam(RexDynamicParam dynamicParam) { + return pushVariable(); + } + + public Void visitRangeRef(RexRangeRef rangeRef) { + return pushVariable(); + } + + public Void visitFieldAccess(RexFieldAccess fieldAccess) { + return pushVariable(); + } + } + + /** Shuttle that pushes predicates into a CASE. */ + protected static class CaseShuttle extends RexShuttle { + @Override public RexNode visitCall(RexCall call) { + for (;;) { + call = (RexCall) super.visitCall(call); + final RexCall old = call; + call = pushPredicateIntoCase(call); + if (call == old) { + return call; + } + } + } + } +} + +// End HiveReduceExpressionsRule.java diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java index 1f5d919..24655ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java @@ -149,9 +149,37 @@ static ASTNode literal(RexLiteral literal, boolean useTypeQualInLiteral) { switch (sqlType) { case BINARY: + case DATE: + case TIME: + case TIMESTAMP: + case INTERVAL_YEAR_MONTH: + case INTERVAL_DAY_TIME: + if (literal.getValue() == null) { + return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); + } + break; + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + case DOUBLE: + case DECIMAL: + case FLOAT: + case REAL: + case VARCHAR: + case CHAR: + case BOOLEAN: + if (literal.getValue3() == null) { + return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); + } + } + + switch (sqlType) { + case BINARY: ByteString bs = (ByteString) literal.getValue(); - val = bs.byteAt(0); - type = HiveParser.BigintLiteral; + val = bs.toString(); + val = "'" + val + "'"; + type = HiveParser.StringLiteral; break; case TINYINT: if (useTypeQualInLiteral) { @@ -217,7 +245,7 @@ static ASTNode literal(RexLiteral literal, boolean useTypeQualInLiteral) { case TIMESTAMP: { val = literal.getValue(); type = HiveParser.TOK_TIMESTAMPLITERAL; - DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); val = df.format(((Calendar) val).getTime()); val = "'" + val + "'"; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java index d026e58..d5d6aef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java @@ -532,7 +532,7 @@ public ASTNode visitCall(RexCall call) { SqlOperator op = call.getOperator(); List astNodeLst = new LinkedList(); if (op.kind == SqlKind.CAST) { - HiveToken ht = TypeConverter.hiveToken(call.getType()); + HiveToken ht = TypeConverter.hiveToken(call); ASTBuilder astBldr = ASTBuilder.construct(ht.type, ht.text); if (ht.args != null) { for (String castArg : ht.args) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java index 631a4ca..536e70e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Map; +import org.apache.calcite.avatica.util.ByteString; import org.apache.calcite.avatica.util.TimeUnit; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rel.RelNode; @@ -71,6 +72,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseNumeric; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFTimestamp; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate; @@ -112,6 +114,10 @@ private InputCtx(RelDataType calciteInpDataType, ImmutableMap h private final ImmutableList inputCtxs; private final boolean flattenExpr; + public RexNodeConverter(RelOptCluster cluster) { + this(cluster, new ArrayList(), false); + } + public RexNodeConverter(RelOptCluster cluster, RelDataType inpDataType, ImmutableMap nameToPosMap, int offset, boolean flattenExpr) { this.cluster = cluster; @@ -259,6 +265,9 @@ private RexNode handleExplicitCast(ExprNodeGenericFuncDesc func, List c GenericUDF udf = func.getGenericUDF(); if ((udf instanceof GenericUDFToChar) || (udf instanceof GenericUDFToVarchar) || (udf instanceof GenericUDFToDecimal) || (udf instanceof GenericUDFToDate) + // Calcite can not specify the scale for timestamp. As a result, all + // the millisecond part will be lost + || (udf instanceof GenericUDFTimestamp) || (udf instanceof GenericUDFToBinary) || castExprUsingUDFBridge(udf)) { castExpr = cluster.getRexBuilder().makeAbstractCast( TypeConverter.convert(func.getTypeInfo(), cluster.getTypeFactory()), @@ -321,6 +330,10 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx coi); RexNode calciteLiteral = null; + if (value == null) { + return cluster.getRexBuilder().makeLiteral(null, + cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true); + } // TODO: Verify if we need to use ConstantObjectInspector to unwrap data switch (hiveTypeCategory) { case BOOLEAN: @@ -378,6 +391,10 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), calciteDataType); break; case DOUBLE: + // TODO: The best solution is to support NaN in expression reduction. + if (Double.isInfinite((Double) value) || Double.isNaN((Double) value)) { + throw new CalciteSemanticException("Infinite or NaN", UnsupportedFeature.Invalid_decimal); + } calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), calciteDataType); break; case CHAR: @@ -430,6 +447,8 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true); break; case BINARY: + calciteLiteral = cluster.getRexBuilder().makeBinaryLiteral(new ByteString((byte[]) value)); + break; case UNKNOWN: default: throw new RuntimeException("UnSupported Literal"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java index 2825f77..eeeedf8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java @@ -28,6 +28,7 @@ import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.sql.SqlIntervalQualifier; import org.apache.calcite.sql.type.SqlTypeName; @@ -321,9 +322,9 @@ public static TypeInfo convertPrimitiveType(RelDataType rType) { } /*********************** Convert Calcite Types To Hive Types ***********************/ - public static HiveToken hiveToken(RelDataType calciteType) { + public static HiveToken hiveToken(RexCall call) { HiveToken ht = null; - + RelDataType calciteType = call.getType(); switch (calciteType.getSqlTypeName()) { case CHAR: { ht = new HiveToken(HiveParser.TOK_CHAR, "TOK_CHAR", String.valueOf(calciteType.getPrecision())); @@ -343,6 +344,14 @@ public static HiveToken hiveToken(RelDataType calciteType) { .getPrecision()), String.valueOf(calciteType.getScale())); } break; + case BINARY: { + if (call.getOperands().get(0).getType().getSqlTypeName().getName().equals("BINARY")) { + ht = new HiveToken(HiveParser.Identifier, "unhex"); + } else { + ht = new HiveToken(HiveParser.Identifier, "binary"); + } + } + break; default: ht = calciteToHiveTypeNameMap.get(calciteType.getSqlTypeName().getName()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index c005b1a..b23745c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -39,6 +39,7 @@ import org.antlr.runtime.tree.TreeVisitorAction; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptPlanner.Executor; import org.apache.calcite.plan.RelOptQuery; import org.apache.calcite.plan.RelOptRule; import org.apache.calcite.plan.RelOptSchema; @@ -68,7 +69,6 @@ import org.apache.calcite.rel.rules.LoptOptimizeJoinRule; import org.apache.calcite.rel.rules.ProjectMergeRule; import org.apache.calcite.rel.rules.ProjectRemoveRule; -import org.apache.calcite.rel.rules.ReduceExpressionsRule; import org.apache.calcite.rel.rules.SemiJoinFilterTransposeRule; import org.apache.calcite.rel.rules.SemiJoinJoinTransposeRule; import org.apache.calcite.rel.rules.SemiJoinProjectTransposeRule; @@ -118,6 +118,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveDefaultRelMetadataProvider; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveHepPlannerContext; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexExecutorImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveVolcanoPlannerContext; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; @@ -150,6 +151,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePreFilteringRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectMergeRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectSortTransposeRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveReduceExpressionsRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRelFieldTrimmer; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortJoinReduceRule; @@ -866,9 +868,12 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // Create MD provider HiveDefaultRelMetadataProvider mdProvider = new HiveDefaultRelMetadataProvider(conf); + // Create executor + Executor executorProvider = new HiveRexExecutorImpl(cluster); + // 2. Apply pre-join order optimizations calcitePreCboPlan = applyPreJoinOrderingTransforms(calciteGenPlan, - mdProvider.getMetadataProvider()); + mdProvider.getMetadataProvider(), executorProvider); // 3. Apply join order optimizations: reordering MST algorithm // If join optimizations failed because of missing stats, we continue with @@ -918,11 +923,9 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // 4. Run other optimizations that do not need stats calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), - HepMatchOrder.BOTTOM_UP, ReduceExpressionsRule.JOIN_INSTANCE, - ReduceExpressionsRule.FILTER_INSTANCE, ReduceExpressionsRule.PROJECT_INSTANCE, - ProjectRemoveRule.INSTANCE, UnionMergeRule.INSTANCE, - new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY), - HiveAggregateProjectMergeRule.INSTANCE); + HepMatchOrder.BOTTOM_UP, ProjectRemoveRule.INSTANCE, UnionMergeRule.INSTANCE, + new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY), + HiveAggregateProjectMergeRule.INSTANCE); // 5. Run aggregate-join transpose (cost based) // If it failed because of missing stats, we continue with @@ -1011,9 +1014,12 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu * original plan * @param mdProvider * meta data provider + * @param executorProvider + * executor * @return */ - private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProvider mdProvider) { + private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, + RelMetadataProvider mdProvider, Executor executorProvider) { // TODO: Decorelation of subquery should be done before attempting // Partition Pruning; otherwise Expression evaluation may try to execute @@ -1053,11 +1059,20 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv basePlan = hepPlan(basePlan, true, mdProvider, HiveJoinAddNotNullRule.INSTANCE); } - // 4. Constant propagation, common filter extraction, and PPD + // 4. Projection Pruning + // Note that we need to run Projection Pruning before ReduceExpressionsRule. + HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, + cluster, HiveProject.DEFAULT_PROJECT_FACTORY, + HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY, + HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY, + HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY); + basePlan = fieldTrimmer.trim(basePlan); + + // 5. Constant propagation, common filter extraction, and PPD basePlan = hepPlan(basePlan, true, mdProvider, - ReduceExpressionsRule.PROJECT_INSTANCE, - ReduceExpressionsRule.FILTER_INSTANCE, - ReduceExpressionsRule.JOIN_INSTANCE, + HiveReduceExpressionsRule.PROJECT_INSTANCE, + HiveReduceExpressionsRule.FILTER_INSTANCE, + HiveReduceExpressionsRule.JOIN_INSTANCE, HivePreFilteringRule.INSTANCE, new HiveFilterProjectTransposeRule(Filter.class, HiveFilter.DEFAULT_FILTER_FACTORY, HiveProject.class, HiveProject.DEFAULT_PROJECT_FACTORY), @@ -1067,19 +1082,11 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv new FilterAggregateTransposeRule(Filter.class, HiveFilter.DEFAULT_FILTER_FACTORY, Aggregate.class)); - // 5. Transitive inference & Partition Pruning + // 6. Transitive inference & Partition Pruning basePlan = hepPlan(basePlan, false, mdProvider, new HiveJoinPushTransitivePredicatesRule( Join.class, HiveFilter.DEFAULT_FILTER_FACTORY), new HivePartitionPruneRule(conf)); - // 6. Projection Pruning - HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, - cluster, HiveProject.DEFAULT_PROJECT_FACTORY, - HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY, - HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSortLimit.HIVE_SORT_REL_FACTORY, - HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY); - basePlan = fieldTrimmer.trim(basePlan); - // 7. Rerun PPD through Project as column pruning would have introduced DT // above scans basePlan = hepPlan(basePlan, true, mdProvider, @@ -1100,7 +1107,23 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv */ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider, RelOptRule... rules) { - return hepPlan(basePlan, followPlanChanges, mdProvider, + return hepPlan(basePlan, followPlanChanges, mdProvider, null, + HepMatchOrder.TOP_DOWN, rules); + } + + /** + * Run the HEP Planner with the given rule set. + * + * @param basePlan + * @param followPlanChanges + * @param mdProvider + * @param executorProvider + * @param rules + * @return optimized RelNode + */ + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, Executor executorProvider, RelOptRule... rules) { + return hepPlan(basePlan, followPlanChanges, mdProvider, executorProvider, HepMatchOrder.TOP_DOWN, rules); } @@ -1114,8 +1137,50 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, * @param rules * @return optimized RelNode */ - private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider, - HepMatchOrder order, RelOptRule... rules) { + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, HepMatchOrder order, RelOptRule... rules) { + + RelNode optimizedRelNode = basePlan; + HepProgramBuilder programBuilder = new HepProgramBuilder(); + if (followPlanChanges) { + programBuilder.addMatchOrder(order); + programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules)); + } else { + // TODO: Should this be also TOP_DOWN? + for (RelOptRule r : rules) + programBuilder.addRuleInstance(r); + } + + HiveRulesRegistry registry = new HiveRulesRegistry(); + HiveHepPlannerContext context = new HiveHepPlannerContext(registry); + HepPlanner planner = new HepPlanner(programBuilder.build(), context); + List list = Lists.newArrayList(); + list.add(mdProvider); + planner.registerMetadataProviders(list); + RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); + basePlan.getCluster().setMetadataProvider( + new CachingRelMetadataProvider(chainedProvider, planner)); + + planner.setRoot(basePlan); + optimizedRelNode = planner.findBestExp(); + + return optimizedRelNode; + } + + /** + * Run the HEP Planner with the given rule set. + * + * @param basePlan + * @param followPlanChanges + * @param mdProvider + * @param executorProvider + * @param order + * @param rules + * @return optimized RelNode + */ + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, Executor executorProvider, HepMatchOrder order, + RelOptRule... rules) { RelNode optimizedRelNode = basePlan; HepProgramBuilder programBuilder = new HepProgramBuilder(); @@ -1131,6 +1196,7 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadata HiveRulesRegistry registry = new HiveRulesRegistry(); HiveHepPlannerContext context = new HiveHepPlannerContext(registry); HepPlanner planner = new HepPlanner(programBuilder.build(), context); + List list = Lists.newArrayList(); list.add(mdProvider); planner.registerMetadataProviders(list); @@ -1138,10 +1204,9 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadata basePlan.getCluster().setMetadataProvider( new CachingRelMetadataProvider(chainedProvider, planner)); - // Executor is required for constant-reduction rules; see [CALCITE-566] - final RexExecutorImpl executor = - new RexExecutorImpl(Schemas.createDataContext(null)); - basePlan.getCluster().getPlanner().setExecutor(executor); + if (executorProvider != null) { + basePlan.getCluster().getPlanner().setExecutor(executorProvider); + } planner.setRoot(basePlan); optimizedRelNode = planner.findBestExp(); diff --git a/ql/src/test/queries/clientpositive/cbo_const.q b/ql/src/test/queries/clientpositive/cbo_const.q new file mode 100644 index 0000000..6409795 --- /dev/null +++ b/ql/src/test/queries/clientpositive/cbo_const.q @@ -0,0 +1,38 @@ +set hive.cbo.enable=false; + +EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3); + + +EXPLAIN +select key from (SELECT key from src where key = 1+3)s; + +select * from (select key from src where key = '1')subq; + +select '1'; + +select * from (select '1')subq; + +select * from (select key from src where false)subq; + +set hive.cbo.enable=true; + +EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3); + + +EXPLAIN +select key from (SELECT key from src where key = 1+3)s; + +select * from (select key from src where key = '1')subq; + +select '1'; + +select * from (select '1')subq; + +select * from (select key from src where false)subq; + diff --git a/ql/src/test/queries/clientpositive/constantfolding.q b/ql/src/test/queries/clientpositive/constantfolding.q new file mode 100644 index 0000000..486bfaa --- /dev/null +++ b/ql/src/test/queries/clientpositive/constantfolding.q @@ -0,0 +1,80 @@ +set hive.optimize.ppd=true; + +-- SORT_QUERY_RESULTS + +drop table if exists union_all_bug_test_1; +drop table if exists union_all_bug_test_2; +create table if not exists union_all_bug_test_1 +( +f1 int, +f2 int +); + +create table if not exists union_all_bug_test_2 +( +f1 int +); + +insert into table union_all_bug_test_1 values (1,1); +insert into table union_all_bug_test_2 values (1); +insert into table union_all_bug_test_1 values (0,0); +insert into table union_all_bug_test_2 values (0); + + + +SELECT f1 +FROM ( + +SELECT +f1 +, if('helloworld' like '%hello%' ,f1,f2) as filter +FROM union_all_bug_test_1 + +union all + +select +f1 +, 0 as filter +from union_all_bug_test_2 +) A +WHERE (filter = 1 and f1 = 1); + + +select percentile(cast(key as bigint), array()) from src where false; + +select unbase64("0xe23") from src; + +SELECT key,randum123, h4 +FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a +WHERE a.h4 <= 3; + +select null from src; + +-- numRows: 2 rawDataSize: 80 +explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src; + +-- numRows: 2 rawDataSize: 112 +explain select cast("1970-12-31 15:59:58.174" as DATE) from src; + +CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; + +FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; + +EXPLAIN +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; + +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; diff --git a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out index 20ccda5..50d5d8f 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out @@ -120,7 +120,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 2098 Data size: 16736 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53))) (type: boolean) + predicate: ((((si = 2) or (si = 3) or (si = 4) or (si = 5) or (si = 6) or (si = 7) or (si = 8) or (si = 10) or (si = 11) or (si = 12) or (si = 13) or (si = 14) or (si = 15) or (si = 16) or (si = 17) or (si = 18) or (si = 28) or (si = 38) or (si = 48) or (si = 53)) and ((t = 1) or (t = 2) or (t = 3) or (t = 4) or (t = 5) or (t = 6) or (t = 7) or (t = 9) or (t = 10) or (t = 11) or (t = 12) or (t = 13) or (t = 14) or (t = 15) or (t = 16) or (t = 17) or (t = 27) or (t = 37) or (t = 47) or (t = 52))) and (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53)))) (type: boolean) Statistics: Num rows: 280 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 280 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE @@ -209,7 +209,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53))) (type: boolean) + predicate: ((((si = 2) or (si = 3) or (si = 4) or (si = 5) or (si = 6) or (si = 7) or (si = 8) or (si = 10) or (si = 11) or (si = 12) or (si = 13) or (si = 14) or (si = 15) or (si = 16) or (si = 17) or (si = 18) or (si = 28) or (si = 38) or (si = 48) or (si = 53)) and ((t = 1) or (t = 2) or (t = 3) or (t = 4) or (t = 5) or (t = 6) or (t = 7) or (t = 9) or (t = 10) or (t = 11) or (t = 12) or (t = 13) or (t = 14) or (t = 15) or (t = 16) or (t = 17) or (t = 27) or (t = 37) or (t = 47) or (t = 52))) and (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53)))) (type: boolean) Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out index 14e7e60..23b991c 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out @@ -864,12 +864,12 @@ STAGE PLANS: Select Operator expressions: ss_store_sk (type: int) outputColumnNames: _col0 - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 321 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 321 Data size: 1236 Basic stats: COMPLETE Column stats: COMPLETE TableScan alias: s Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE @@ -909,10 +909,10 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) 2 _col0 (type: int) - outputColumnNames: _col2 + outputColumnNames: _col1 Statistics: Num rows: 273 Data size: 1092 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col2 (type: int) + expressions: _col1 (type: int) outputColumnNames: _col0 Statistics: Num rows: 273 Data size: 1092 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join19.q.out b/ql/src/test/results/clientpositive/auto_join19.q.out index 8a57cb0..616aa77 100644 --- a/ql/src/test/results/clientpositive/auto_join19.q.out +++ b/ql/src/test/results/clientpositive/auto_join19.q.out @@ -65,10 +65,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col2 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join9.q.out b/ql/src/test/results/clientpositive/auto_join9.q.out index 13dd5de..5bc63ec 100644 --- a/ql/src/test/results/clientpositive/auto_join9.q.out +++ b/ql/src/test/results/clientpositive/auto_join9.q.out @@ -24,20 +24,20 @@ STAGE PLANS: Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:src1 + $hdt$_0:src2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:src1 + $hdt$_0:src2 TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: @@ -48,14 +48,14 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -63,10 +63,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketpruning1.q.out b/ql/src/test/results/clientpositive/bucketpruning1.q.out index d11239f..f05c42a 100644 --- a/ql/src/test/results/clientpositive/bucketpruning1.q.out +++ b/ql/src/test/results/clientpositive/bucketpruning1.q.out @@ -1651,12 +1651,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcbucket_pruned - filterExpr: (((key = 1) and (ds = '2008-04-08')) or (key = 2)) (type: boolean) + filterExpr: (((key = 1) or (key = 2)) and (((key = 1) and (ds = '2008-04-08')) or (key = 2))) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false - predicate: (((key = 1) and (ds = '2008-04-08')) or (key = 2)) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (((key = 1) and (ds = '2008-04-08')) or (key = 2))) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) @@ -1743,12 +1743,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcbucket_pruned - filterExpr: (((key = 1) and (ds = '2008-04-08')) and ((value = 'One') or (value = 'Two'))) (type: boolean) + filterExpr: ((((value = 'One') or (value = 'Two')) and (key = 1)) and (ds = '2008-04-08')) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false - predicate: (((key = 1) and (ds = '2008-04-08')) and ((value = 'One') or (value = 'Two'))) (type: boolean) + predicate: ((((value = 'One') or (value = 'Two')) and (key = 1)) and (ds = '2008-04-08')) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: 1 (type: int), value (type: string), '2008-04-08' (type: string) diff --git a/ql/src/test/results/clientpositive/cast1.q.out b/ql/src/test/results/clientpositive/cast1.q.out index 0bdecba..48a0c14 100644 --- a/ql/src/test/results/clientpositive/cast1.q.out +++ b/ql/src/test/results/clientpositive/cast1.q.out @@ -105,11 +105,11 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] +POSTHOOK: Lineage: dest1.c3 SIMPLE [] +POSTHOOK: Lineage: dest1.c4 SIMPLE [] +POSTHOOK: Lineage: dest1.c5 SIMPLE [] POSTHOOK: Lineage: dest1.c6 EXPRESSION [] POSTHOOK: Lineage: dest1.c7 EXPRESSION [] PREHOOK: query: select dest1.* FROM dest1 diff --git a/ql/src/test/results/clientpositive/cbo_const.q.out b/ql/src/test/results/clientpositive/cbo_const.q.out new file mode 100644 index 0000000..79a6783 --- /dev/null +++ b/ql/src/test/results/clientpositive/cbo_const.q.out @@ -0,0 +1,370 @@ +PREHOOK: query: EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: x + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key = 3) and value is not null) (type: boolean) + Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '3' (type: string) + sort order: + + Map-reduce partition columns: '3' (type: string) + Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + TableScan + alias: y + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 3) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '3' (type: string) + sort order: + + Map-reduce partition columns: '3' (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col1, _col6 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + value expressions: _col6 (type: string) + TableScan + alias: z + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (((ds = '2008-04-08') and (hr = 14)) and value is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col1 (type: string) + 1 value (type: string) + outputColumnNames: _col6, _col11 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '3' (type: string), _col11 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN +select key from (SELECT key from src where key = 1+3)s +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +select key from (SELECT key from src where key = 1+3)s +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 4) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '4' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select key from src where key = '1')subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key from src where key = '1')subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: select '1' +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: select * from (select '1')subq +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select * from (select '1')subq +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: select * from (select key from src where false)subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key from src where false)subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: y + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(key) = 3.0) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: string) + outputColumnNames: _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '3' (type: string) + sort order: + + Map-reduce partition columns: '3' (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + TableScan + alias: x + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((UDFToDouble(key) = 3.0) and value is not null) (type: boolean) + Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: string) + outputColumnNames: _col1 + Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '3' (type: string) + sort order: + + Map-reduce partition columns: '3' (type: string) + Statistics: Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col1, _col3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + TableScan + alias: z + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (((ds = '2008-04-08') and (UDFToDouble(hr) = 14.0)) and value is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: value (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col3 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col1, _col4 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '3' (type: string), _col4 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN +select key from (SELECT key from src where key = 1+3)s +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +select key from (SELECT key from src where key = 1+3)s +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(key) = 4.0) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '4' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select key from src where key = '1')subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key from src where key = '1')subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: select '1' +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: select * from (select '1')subq +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select * from (select '1')subq +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: select * from (select key from src where false)subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key from src where false)subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out index 09b981b..e938612 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out @@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"15","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]} 15 15 15 diff --git a/ql/src/test/results/clientpositive/constantfolding.q.out b/ql/src/test/results/clientpositive/constantfolding.q.out new file mode 100644 index 0000000..6bbc29e --- /dev/null +++ b/ql/src/test/results/clientpositive/constantfolding.q.out @@ -0,0 +1,1281 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +drop table if exists union_all_bug_test_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- SORT_QUERY_RESULTS + +drop table if exists union_all_bug_test_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists union_all_bug_test_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists union_all_bug_test_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists union_all_bug_test_1 +( +f1 int, +f2 int +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: query: create table if not exists union_all_bug_test_1 +( +f1 int, +f2 int +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@union_all_bug_test_1 +PREHOOK: query: create table if not exists union_all_bug_test_2 +( +f1 int +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: query: create table if not exists union_all_bug_test_2 +( +f1 int +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@union_all_bug_test_2 +PREHOOK: query: insert into table union_all_bug_test_1 values (1,1) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: query: insert into table union_all_bug_test_1 values (1,1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: Lineage: union_all_bug_test_1.f1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: union_all_bug_test_1.f2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into table union_all_bug_test_2 values (1) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: query: insert into table union_all_bug_test_2 values (1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: Lineage: union_all_bug_test_2.f1 EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table union_all_bug_test_1 values (0,0) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: query: insert into table union_all_bug_test_1 values (0,0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: Lineage: union_all_bug_test_1.f1 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: union_all_bug_test_1.f2 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into table union_all_bug_test_2 values (0) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: query: insert into table union_all_bug_test_2 values (0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: Lineage: union_all_bug_test_2.f1 EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: SELECT f1 +FROM ( + +SELECT +f1 +, if('helloworld' like '%hello%' ,f1,f2) as filter +FROM union_all_bug_test_1 + +union all + +select +f1 +, 0 as filter +from union_all_bug_test_2 +) A +WHERE (filter = 1 and f1 = 1) +PREHOOK: type: QUERY +PREHOOK: Input: default@union_all_bug_test_1 +PREHOOK: Input: default@union_all_bug_test_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT f1 +FROM ( + +SELECT +f1 +, if('helloworld' like '%hello%' ,f1,f2) as filter +FROM union_all_bug_test_1 + +union all + +select +f1 +, 0 as filter +from union_all_bug_test_2 +) A +WHERE (filter = 1 and f1 = 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_all_bug_test_1 +POSTHOOK: Input: default@union_all_bug_test_2 +#### A masked pattern was here #### +1 +PREHOOK: query: select percentile(cast(key as bigint), array()) from src where false +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select percentile(cast(key as bigint), array()) from src where false +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +NULL +PREHOOK: query: select unbase64("0xe23") from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select unbase64("0xe23") from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +PREHOOK: query: SELECT key,randum123, h4 +FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a +WHERE a.h4 <= 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key,randum123, h4 +FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a +WHERE a.h4 <= 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: select null from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select null from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- numRows: 2 rawDataSize: 80 +explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src +PREHOOK: type: QUERY +POSTHOOK: query: -- numRows: 2 rawDataSize: 80 +explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 1970-12-31 15:59:58.174 (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: -- numRows: 2 rawDataSize: 112 +explain select cast("1970-12-31 15:59:58.174" as DATE) from src +PREHOOK: type: QUERY +POSTHOOK: query: -- numRows: 2 rawDataSize: 112 +explain select cast("1970-12-31 15:59:58.174" as DATE) from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: null (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + ListSink + +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dest1 +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +PREHOOK: query: EXPLAIN +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: dest1 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 1.098612288668 (type: double), null (type: double), null (type: double), 1.098612288668 (type: double), null (type: double), null (type: double), 1.584962500721 (type: double), null (type: double), null (type: double), 0.47712125472 (type: double), null (type: double), null (type: double), 1.584962500721 (type: double), null (type: double), null (type: double), null (type: double), -1.0 (type: double), 7.389056098931 (type: double), 8.0 (type: double), 8.0 (type: double), 0.125 (type: double), 8.0 (type: double), 2.0 (type: double), NaN (type: double), 1.0 (type: double), 1.0 (type: double), 8.0 (type: double), 8.0 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27 + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +1.098612288668 NULL NULL 1.098612288668 NULL NULL 1.584962500721 NULL NULL 0.47712125472 NULL NULL 1.584962500721 NULL NULL NULL -1.0 7.389056098931 8.0 8.0 0.125 8.0 2.0 NaN 1.0 1.0 8.0 8.0 diff --git a/ql/src/test/results/clientpositive/constprog_partitioner.q.out b/ql/src/test/results/clientpositive/constprog_partitioner.q.out index 9bf47c2..d36fccc 100644 --- a/ql/src/test/results/clientpositive/constprog_partitioner.q.out +++ b/ql/src/test/results/clientpositive/constprog_partitioner.q.out @@ -111,13 +111,13 @@ STAGE PLANS: predicate: ((l_linenumber = 1) and l_orderkey is not null) (type: boolean) Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int) - outputColumnNames: _col0, _col1, _col2 + expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int), 1 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) + key expressions: _col0 (type: int), _col3 (type: int) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col3 (type: int) Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int) TableScan @@ -127,26 +127,26 @@ STAGE PLANS: predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int) - outputColumnNames: _col0 + expressions: l_orderkey (type: int), 1 (type: int) + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int) + keys: _col0 (type: int), _col1 (type: int) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) + key expressions: _col0 (type: int), _col1 (type: int) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), _col3 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE Select Operator diff --git a/ql/src/test/results/clientpositive/create_genericudf.q.out b/ql/src/test/results/clientpositive/create_genericudf.q.out index 586f0ba..db3a9b5 100644 --- a/ql/src/test/results/clientpositive/create_genericudf.q.out +++ b/ql/src/test/results/clientpositive/create_genericudf.q.out @@ -50,13 +50,13 @@ SELECT POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] POSTHOOK: Lineage: dest1.c3 EXPRESSION [] POSTHOOK: Lineage: dest1.c4 EXPRESSION [] POSTHOOK: Lineage: dest1.c5 EXPRESSION [] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [] +POSTHOOK: Lineage: dest1.c6 SIMPLE [] +POSTHOOK: Lineage: dest1.c7 SIMPLE [] PREHOOK: query: SELECT dest1.* FROM dest1 LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 diff --git a/ql/src/test/results/clientpositive/cte_2.q.out b/ql/src/test/results/clientpositive/cte_2.q.out index a8bc760..d6923ba 100644 --- a/ql/src/test/results/clientpositive/cte_2.q.out +++ b/ql/src/test/results/clientpositive/cte_2.q.out @@ -40,7 +40,7 @@ select * POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@s1 -POSTHOOK: Lineage: s1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: s1.key SIMPLE [] POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select * from s1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out index 99eb3f7..3a3e3fa 100644 --- a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out +++ b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out @@ -283,32 +283,32 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 1.0) (type: boolean) - Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE + predicate: (UDFToDouble(key) is not null and key is not null) (type: boolean) + Statistics: Num rows: 5 Data size: 65 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: c_int (type: int), c_float (type: float) - outputColumnNames: _col1, _col2 - Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE + expressions: key (type: string), c_int (type: int), c_float (type: float) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 65 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: '1.0' (type: string) - sort order: + - Map-reduce partition columns: '1.0' (type: string) - Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE + key expressions: UDFToDouble(_col0) (type: double), _col0 (type: string) + sort order: ++ + Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: string) + Statistics: Num rows: 5 Data size: 65 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: float) TableScan alias: t3 Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((c_int = 1) and (UDFToDouble(key) = 1.0)) (type: boolean) + predicate: ((c_int = 1) and key is not null) (type: boolean) Statistics: Num rows: 5 Data size: 65 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: c_float (type: float) - outputColumnNames: _col2 + expressions: key (type: string), c_float (type: float) + outputColumnNames: _col0, _col2 Statistics: Num rows: 5 Data size: 65 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: '1.0' (type: string) - sort order: + - Map-reduce partition columns: '1.0' (type: string) + key expressions: 1.0 (type: double), _col0 (type: string) + sort order: ++ + Map-reduce partition columns: 1.0 (type: double), _col0 (type: string) Statistics: Num rows: 5 Data size: 65 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: float) Reduce Operator Tree: @@ -316,15 +316,17 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col1, _col2, _col5 - Statistics: Num rows: 11 Data size: 144 Basic stats: COMPLETE Column stats: NONE + 0 UDFToDouble(_col0) (type: double), _col0 (type: string) + 1 UDFToDouble(_col1) (type: double), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2, _col5 + Statistics: Num rows: 5 Data size: 71 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((_col2 + _col5) > 2.0) or ((_col1 + 1) > 2)) (type: boolean) - Statistics: Num rows: 6 Data size: 78 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 6 Data size: 78 Basic stats: COMPLETE Column stats: NONE + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -339,21 +341,23 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 1.0) (type: boolean) + predicate: key is not null (type: boolean) Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE Select Operator + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: '1.0' (type: string) + key expressions: _col0 (type: string) sort order: + - Map-reduce partition columns: '1.0' (type: string) + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator - key expressions: '1.0' (type: string) + key expressions: _col0 (type: string) sort order: + - Map-reduce partition columns: '1.0' (type: string) - Statistics: Num rows: 6 Data size: 78 Basic stats: COMPLETE Column stats: NONE + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -361,18 +365,15 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) + outputColumnNames: _col0 Statistics: Num rows: 11 Data size: 144 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '1.0' (type: string) - outputColumnNames: _col0 + File Output Operator + compressed: false Statistics: Num rows: 11 Data size: 144 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 11 Data size: 144 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out index 176a837..9bb677c 100644 --- a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out +++ b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out @@ -231,7 +231,7 @@ STAGE PLANS: name: default.filter_join_breaktask name: default.filter_join_breaktask Truncated Path -> Alias: - /filter_join_breaktask/ds=2008-04-08 [$hdt$_0:$hdt$_0:f, $hdt$_1:$hdt$_1:f] + /filter_join_breaktask/ds=2008-04-08 [$hdt$_0:$hdt$_0:f, $hdt$_1:$hdt$_1:$hdt$_1:f] Needs Tagging: true Reduce Operator Tree: Join Operator diff --git a/ql/src/test/results/clientpositive/flatten_and_or.q.out b/ql/src/test/results/clientpositive/flatten_and_or.q.out index 9c51ff3..c1b4302 100644 --- a/ql/src/test/results/clientpositive/flatten_and_or.q.out +++ b/ql/src/test/results/clientpositive/flatten_and_or.q.out @@ -44,7 +44,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3'))) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3')))) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out index 56c5176..19f574e 100644 --- a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out @@ -1619,38 +1619,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col3) > 15.0) and (UDFToDouble(_col3) < 25.0)) (type: boolean) + predicate: ((UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out index 6164a26..e3e4a50 100644 --- a/ql/src/test/results/clientpositive/groupby_ppd.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out @@ -28,16 +28,16 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: foo (type: int) - outputColumnNames: _col1 + outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 1 (type: int), _col1 (type: int) - outputColumnNames: _col0, _col1 + expressions: _col0 (type: int) + outputColumnNames: _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int) + keys: 1 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -54,16 +54,16 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: foo (type: int) - outputColumnNames: _col1 + outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 1 (type: int), _col1 (type: int) - outputColumnNames: _col0, _col1 + expressions: _col0 (type: int) + outputColumnNames: _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int) + keys: 1 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -79,7 +79,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col1 (type: int), _col0 (type: int) + expressions: _col1 (type: int), 1 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/input42.q.out b/ql/src/test/results/clientpositive/input42.q.out index 2974159..7658dc7 100644 --- a/ql/src/test/results/clientpositive/input42.q.out +++ b/ql/src/test/results/clientpositive/input42.q.out @@ -1724,14 +1724,53 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-0 is a root stage + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Partition Description: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: (rand(100) < 0.1) (type: boolean) + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### Partition + base file name: hr=11 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -1775,7 +1814,9 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcpart name: default.srcpart +#### A masked pattern was here #### Partition + base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -1819,20 +1860,15 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcpart name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=11 [a] + /srcpart/ds=2008-04-08/hr=12 [a] + + Stage: Stage-0 + Fetch Operator + limit: -1 Processor Tree: - TableScan - alias: a - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: (rand(100) < 0.1) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE - ListSink + ListSink PREHOOK: query: select * from srcpart a where a.ds='2008-04-08' and rand(100) < 0.1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/input_part6.q.out b/ql/src/test/results/clientpositive/input_part6.q.out index fa51cdf..c01d8af 100644 --- a/ql/src/test/results/clientpositive/input_part6.q.out +++ b/ql/src/test/results/clientpositive/input_part6.q.out @@ -19,7 +19,7 @@ STAGE PLANS: predicate: (UDFToDouble(ds) = 1996.0) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), '1996' (type: string), hr (type: string) + expressions: key (type: string), value (type: string), '1996.0' (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/insert1.q.out b/ql/src/test/results/clientpositive/insert1.q.out index 49dd2d5..7a2c429 100644 --- a/ql/src/test/results/clientpositive/insert1.q.out +++ b/ql/src/test/results/clientpositive/insert1.q.out @@ -26,7 +26,7 @@ POSTHOOK: query: insert overwrite table insert1 select a.key, a.value from inser POSTHOOK: type: QUERY POSTHOOK: Input: default@insert2 POSTHOOK: Output: default@insert1 -POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.key SIMPLE [] POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain insert into table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/join28.q.out b/ql/src/test/results/clientpositive/join28.q.out index d748495..681d10f 100644 --- a/ql/src/test/results/clientpositive/join28.q.out +++ b/ql/src/test/results/clientpositive/join28.q.out @@ -109,10 +109,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out index afb373d..149a364 100644 --- a/ql/src/test/results/clientpositive/join32.q.out +++ b/ql/src/test/results/clientpositive/join32.q.out @@ -131,7 +131,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 $hdt$_2:x @@ -174,20 +174,20 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 Position of Big Table: 0 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join32_lessSize.q.out b/ql/src/test/results/clientpositive/join32_lessSize.q.out index 8e71710..772eede 100644 --- a/ql/src/test/results/clientpositive/join32_lessSize.q.out +++ b/ql/src/test/results/clientpositive/join32_lessSize.q.out @@ -163,7 +163,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 Position of Big Table: 0 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -175,7 +175,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3 + columns _col0,_col1 columns.types string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -303,7 +303,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 @@ -316,13 +316,13 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -362,7 +362,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3 + columns _col0,_col1 columns.types string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -371,7 +371,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3 + columns _col0,_col1 columns.types string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1566,11 +1566,11 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 Position of Big Table: 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2556,10 +2556,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2813,10 +2813,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out index afb373d..149a364 100644 --- a/ql/src/test/results/clientpositive/join33.q.out +++ b/ql/src/test/results/clientpositive/join33.q.out @@ -131,7 +131,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 $hdt$_2:x @@ -174,20 +174,20 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 Position of Big Table: 0 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join42.q.out b/ql/src/test/results/clientpositive/join42.q.out index dda95d9..943a4d5 100644 --- a/ql/src/test/results/clientpositive/join42.q.out +++ b/ql/src/test/results/clientpositive/join42.q.out @@ -80,8 +80,6 @@ POSTHOOK: Output: default@acct POSTHOOK: Lineage: acct.acc_n EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] POSTHOOK: Lineage: acct.aid EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] POSTHOOK: Lineage: acct.brn EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ] -Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: --[HIVE-10841] (WHERE col is not null) does not work sometimes for queries with many JOIN statements explain select acct.ACC_N, @@ -114,8 +112,7 @@ STAGE DEPENDENCIES: Stage-1 is a root stage Stage-2 depends on stages: Stage-1 Stage-3 depends on stages: Stage-2 - Stage-4 depends on stages: Stage-3 - Stage-0 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -130,66 +127,50 @@ STAGE PLANS: Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 4436 (type: int) + sort order: + + Map-reduce partition columns: 4436 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE TableScan alias: la Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((4436 = loan_id) and aid is not null) and pi_id is not null) (type: boolean) + predicate: (((loan_id = 4436) and aid is not null) and pi_id is not null) (type: boolean) Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: aid (type: int), pi_id (type: int) outputColumnNames: _col1, _col2 Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 4436 (type: int) + sort order: + + Map-reduce partition columns: 4436 (type: int) Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 - 1 - outputColumnNames: _col2, _col3 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: int), _col3 (type: int) TableScan alias: fr Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (4436 = loan_id) (type: boolean) + predicate: (loan_id = 4436) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 4436 (type: int) + sort order: + + Map-reduce partition columns: 4436 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 + Inner Join 0 to 2 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) + 2 _col0 (type: int) outputColumnNames: _col2, _col3 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -197,7 +178,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-3 + Stage: Stage-2 Map Reduce Map Operator Tree: TableScan @@ -205,7 +186,7 @@ STAGE PLANS: key expressions: _col2 (type: int) sort order: + Map-reduce partition columns: _col2 (type: int) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col3 (type: int) TableScan alias: a @@ -248,7 +229,7 @@ STAGE PLANS: 1 _col0 (type: int) 2 _col0 (type: int) outputColumnNames: _col3, _col7, _col8 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 17 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -256,7 +237,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-4 + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan @@ -264,7 +245,7 @@ STAGE PLANS: key expressions: _col3 (type: int) sort order: + Map-reduce partition columns: _col3 (type: int) - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 17 Basic stats: COMPLETE Column stats: NONE value expressions: _col7 (type: int), _col8 (type: int) TableScan alias: pi @@ -289,14 +270,14 @@ STAGE PLANS: 0 _col3 (type: int) 1 _col0 (type: int) outputColumnNames: _col7, _col8 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 18 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col7 (type: int), _col8 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 18 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 18 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -308,8 +289,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select acct.ACC_N, acct.brn diff --git a/ql/src/test/results/clientpositive/join9.q.out b/ql/src/test/results/clientpositive/join9.q.out index 8421036..9af7e80 100644 --- a/ql/src/test/results/clientpositive/join9.q.out +++ b/ql/src/test/results/clientpositive/join9.q.out @@ -82,7 +82,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -90,8 +90,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -99,9 +99,10 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 0 + value expressions: _col1 (type: string) auto parallelism: false TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -109,8 +110,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -118,7 +119,6 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 1 - value expressions: _col1 (type: string) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -214,8 +214,8 @@ STAGE PLANS: name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /src [$hdt$_1:src2] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src1] + /src [$hdt$_0:src2] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:src1] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -224,10 +224,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out index 83785f2..b6d3d43 100644 --- a/ql/src/test/results/clientpositive/lineage2.q.out +++ b/ql/src/test/results/clientpositive/lineage2.q.out @@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"15","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]} 15 15 15 diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out index fb5e9df..2b2fa89 100644 --- a/ql/src/test/results/clientpositive/lineage3.q.out +++ b/ql/src/test/results/clientpositive/lineage3.q.out @@ -166,7 +166,7 @@ where key in (select key+18 from src1) order by key PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} 146 val_146 273 val_273 PREHOOK: query: select * from src1 a @@ -178,15 +178,15 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 311 val_311 -Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select key, value from src1 where key not in (select key+18 from src1) order by key PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]} PREHOOK: query: select * from src1 a where not exists (select cint from alltypesorc b @@ -196,7 +196,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 369 401 val_401 406 val_406 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out index 7f32108..c00e873 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out @@ -413,14 +413,99 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-0 is a root stage + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Partition Description: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: fact_daily + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: ((((value = 'val_484') or (value = 'val_238')) and ((key = '484') or (key = '238'))) and (((key = '484') and (value = 'val_484')) or ((key = '238') and (value = 'val_238')))) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: value=val_238 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 1 + hr 4 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.fact_daily + numFiles 3 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct fact_daily { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.fact_daily + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct fact_daily { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.fact_daily + name: default.fact_daily +#### A masked pattern was here #### Partition + base file name: value=val_484 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -464,20 +549,15 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.fact_daily name: default.fact_daily + Truncated Path -> Alias: + /fact_daily/ds=1/hr=4/key=238/value=val_238 [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily] + + Stage: Stage-0 + Fetch Operator + limit: -1 Processor Tree: - TableScan - alias: fact_daily - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: (((key = '484') and (value = 'val_484')) or ((key = '238') and (value = 'val_238'))) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - ListSink + ListSink PREHOOK: query: -- List Bucketing Query SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out index d46b0ae..273234f 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out @@ -690,12 +690,10 @@ STAGE PLANS: predicate: (x = 484) (type: boolean) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 484 (type: int) - outputColumnNames: _col0 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col0 (type: int) + keys: 484 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/literal_decimal.q.out b/ql/src/test/results/clientpositive/literal_decimal.q.out index eddc1a4..0b6299b 100644 --- a/ql/src/test/results/clientpositive/literal_decimal.q.out +++ b/ql/src/test/results/clientpositive/literal_decimal.q.out @@ -14,12 +14,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: -1 (type: decimal(1,0)), 0 (type: decimal(1,0)), 1 (type: decimal(1,0)), 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 99999999999999999 (type: decimal(17,0)), 99999999999999999.9999999999999 (type: decimal(30,13)), null (type: decimal(1,0)) + expressions: -1 (type: int), 0 (type: int), 1 (type: int), 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 99999999999999999 (type: bigint), 99999999999999999.9999999999999 (type: decimal(30,13)), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 500 Data size: 392000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 784 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1 diff --git a/ql/src/test/results/clientpositive/mapjoin_subquery.q.out b/ql/src/test/results/clientpositive/mapjoin_subquery.q.out index 1f7a5f4..8a54caa 100644 --- a/ql/src/test/results/clientpositive/mapjoin_subquery.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_subquery.q.out @@ -96,10 +96,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -339,10 +339,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out index e4a9e5b..f3f3639 100644 --- a/ql/src/test/results/clientpositive/mergejoin.q.out +++ b/ql/src/test/results/clientpositive/mergejoin.q.out @@ -2659,7 +2659,6 @@ POSTHOOK: Input: default@tab_part POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### 480 -Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@tab @@ -2679,7 +2678,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08 0 val_0 2008-04-08 NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 -Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@tab @@ -2694,8 +2692,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 -Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join @@ -2714,7 +2710,6 @@ POSTHOOK: Input: default@tab POSTHOOK: Input: default@tab_part POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### -Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join @@ -2739,7 +2734,6 @@ NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 -Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a @@ -3267,8 +3261,6 @@ NULL NULL NULL NULL NULL NULL 97 val_97 2008-04-08 NULL NULL NULL NULL NULL NULL 97 val_97 2008-04-08 NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 -Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a join diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out index 4e23917..8a04aa7 100644 --- a/ql/src/test/results/clientpositive/pcr.q.out +++ b/ql/src/test/results/clientpositive/pcr.q.out @@ -1804,8 +1804,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2010,8 +2010,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2301,9 +2301,9 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] - /pcr_t1/ds=2000-04-10 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2484,7 +2484,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2))) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2)))) (type: boolean) Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) @@ -2590,8 +2590,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -3574,10 +3574,10 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] - /pcr_t1/ds=2000-04-10 [pcr_t1] - /pcr_t1/ds=2000-04-11 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-11 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -3893,9 +3893,9 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] - /pcr_t1/ds=2000-04-10 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -5579,8 +5579,8 @@ STAGE PLANS: name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [srcpart] - /srcpart/ds=2008-04-08/hr=12 [srcpart] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] Needs Tagging: false Reduce Operator Tree: Select Operator diff --git a/ql/src/test/results/clientpositive/pointlookup.q.out b/ql/src/test/results/clientpositive/pointlookup.q.out index a99b388..21bd22d 100644 --- a/ql/src/test/results/clientpositive/pointlookup.q.out +++ b/ql/src/test/results/clientpositive/pointlookup.q.out @@ -44,7 +44,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3'))) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3')))) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -110,7 +110,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3'))) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -176,7 +176,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean) + predicate: ((((value = '8') or (value = '5') or (value = '6') or (value = '1') or (value = '3')) and ((key = '0') or (key = '1') or (key = '2') or (key = '3') or (key = '4') or (key = '5') or (key = '6') or (key = '7') or (key = '8') or (key = '9') or (key = '10'))) and (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3'))) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/pointlookup2.q.out b/ql/src/test/results/clientpositive/pointlookup2.q.out index b6270b3..ed3eb2b 100644 --- a/ql/src/test/results/clientpositive/pointlookup2.q.out +++ b/ql/src/test/results/clientpositive/pointlookup2.q.out @@ -68,7 +68,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_t1 POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] POSTHOOK: Lineage: pcr_t2.key SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: from pcr_t1 @@ -83,8 +83,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_t1 POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: pcr_t2.key SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] +POSTHOOK: Lineage: pcr_t2.key SIMPLE [] POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain extended select key, value, ds @@ -165,7 +165,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09'))) (type: boolean) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) @@ -271,8 +271,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator diff --git a/ql/src/test/results/clientpositive/pointlookup3.q.out b/ql/src/test/results/clientpositive/pointlookup3.q.out index a37e079..e10d787 100644 --- a/ql/src/test/results/clientpositive/pointlookup3.q.out +++ b/ql/src/test/results/clientpositive/pointlookup3.q.out @@ -125,7 +125,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (struct(ds1,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (struct(key,ds1)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09'))) (type: boolean) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds1 (type: string), ds2 (type: string) @@ -233,8 +233,8 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1] + /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1] + /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -365,7 +365,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (key = 1) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (key = 1)) (type: boolean) Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), ds1 (type: string) @@ -427,7 +427,7 @@ STAGE PLANS: name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] + /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator diff --git a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out index 29113e2..a819a79 100644 --- a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out +++ b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out @@ -354,10 +354,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col2 + outputColumnNames: _col0, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: int), _col2 (type: int) + expressions: _col0 (type: int), _col3 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_union_view.q.out b/ql/src/test/results/clientpositive/ppd_union_view.q.out index 5ec1200..ba51cbd 100644 --- a/ql/src/test/results/clientpositive/ppd_union_view.q.out +++ b/ql/src/test/results/clientpositive/ppd_union_view.q.out @@ -181,9 +181,9 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) + key expressions: _col0 (type: string), '2011-10-13' (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), '2011-10-13' (type: string) Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE tag: 0 value expressions: _col1 (type: string) @@ -201,9 +201,9 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) + key expressions: _col1 (type: string), '2011-10-13' (type: string) + sort order: ++ + Map-reduce partition columns: _col1 (type: string), '2011-10-13' (type: string) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE tag: 1 value expressions: _col0 (type: string) @@ -310,8 +310,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col0 (type: string) - 1 _col1 (type: string) + 0 _col0 (type: string), _col2 (type: string) + 1 _col1 (type: string), _col2 (type: string) outputColumnNames: _col1, _col3 Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -528,9 +528,9 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) + key expressions: _col0 (type: string), '2011-10-15' (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), '2011-10-15' (type: string) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE tag: 0 value expressions: _col1 (type: string) @@ -541,16 +541,16 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (('2011-10-15' = ds) and keymap is not null) (type: boolean) + predicate: ((ds = '2011-10-15') and keymap is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), keymap (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) + key expressions: _col1 (type: string), '2011-10-15' (type: string) + sort order: ++ + Map-reduce partition columns: _col1 (type: string), '2011-10-15' (type: string) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE tag: 1 value expressions: _col0 (type: string) @@ -561,8 +561,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col0 (type: string) - 1 _col1 (type: string) + 0 _col0 (type: string), _col2 (type: string) + 1 _col1 (type: string), _col2 (type: string) outputColumnNames: _col1, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator diff --git a/ql/src/test/results/clientpositive/quotedid_basic.q.out b/ql/src/test/results/clientpositive/quotedid_basic.q.out index 50c83a8..f9dc0e0 100644 --- a/ql/src/test/results/clientpositive/quotedid_basic.q.out +++ b/ql/src/test/results/clientpositive/quotedid_basic.q.out @@ -101,11 +101,11 @@ STAGE PLANS: predicate: (!@#$%^&*()_q = '1') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: x+1 (type: string), y&y (type: string), '1' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: x+1 (type: string), y&y (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: _col0 (type: string), _col1 (type: string), '1' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -156,11 +156,11 @@ STAGE PLANS: predicate: (!@#$%^&*()_q = '1') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: x+1 (type: string), y&y (type: string), '1' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: x+1 (type: string), y&y (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: _col0 (type: string), _col1 (type: string), '1' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -260,11 +260,11 @@ STAGE PLANS: predicate: (!@#$%^&*()_q = '1') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: x+1 (type: string), y&y (type: string), '1' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: x+1 (type: string), y&y (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: _col0 (type: string), _col1 (type: string), '1' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE diff --git a/ql/src/test/results/clientpositive/quotedid_partition.q.out b/ql/src/test/results/clientpositive/quotedid_partition.q.out index bc52c82..30cfce3 100644 --- a/ql/src/test/results/clientpositive/quotedid_partition.q.out +++ b/ql/src/test/results/clientpositive/quotedid_partition.q.out @@ -46,11 +46,11 @@ STAGE PLANS: predicate: (x+1 = '10') (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: '10' (type: string), y&y (type: string), 'a' (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: y&y (type: string) + outputColumnNames: _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string) + keys: '10' (type: string), _col1 (type: string), 'a' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out index 634e171..4db38a7 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out @@ -57,14 +57,53 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-0 is a root stage + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-0 - Fetch Operator - limit: -1 - Partition Description: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: ((rand(1) < 0.1) and ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0))) (type: boolean) + Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### Partition + base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -108,20 +147,14 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcpart name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=12 [a] + + Stage: Stage-0 + Fetch Operator + limit: -1 Processor Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: false - predicate: ((rand(1) < 0.1) and ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0))) (type: boolean) - Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE - ListSink + ListSink PREHOOK: query: select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out index 4d3f81d..dccbe98 100644 --- a/ql/src/test/results/clientpositive/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out @@ -780,38 +780,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -1532,34 +1528,30 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out index 6106188..4b29056 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out @@ -238,17 +238,28 @@ STAGE PLANS: predicate: (key = 238) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), value (type: string) + expressions: 238 (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + value expressions: _col1 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table2 Stage: Stage-0 Move Operator @@ -277,7 +288,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 POSTHOOK: Output: default@test_table2@ds=2 -POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [] POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(*) from test_table2 where ds = '2' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out index 441338e..05ebe9f 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out @@ -46,9 +46,6 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[22][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-4:MAPRED' is a cross product PREHOOK: query: explain select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY @@ -74,26 +71,30 @@ STAGE PLANS: Select Operator Statistics: Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: NONE TableScan alias: b Statistics: Num rows: 51 Data size: 206 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -107,19 +108,23 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 5 (type: int), 5 (type: int) @@ -145,26 +150,30 @@ STAGE PLANS: Select Operator Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE TableScan alias: d Statistics: Num rows: 55 Data size: 222 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -179,11 +188,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[56][bigTable=?] in task 'Stage-7:MAPRED' is a cross product -Warning: Map Join MAPJOIN[48][bigTable=?] in task 'Stage-6:MAPRED' is a cross product -Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -Warning: Map Join MAPJOIN[57][bigTable=?] in task 'Stage-9:MAPRED' is a cross product -Warning: Map Join MAPJOIN[58][bigTable=?] in task 'Stage-10:MAPRED' is a cross product PREHOOK: query: -- explain -- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value @@ -226,14 +230,14 @@ STAGE PLANS: alias: b Statistics: Num rows: 51 Data size: 206 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Stage: Stage-9 Map Reduce @@ -250,8 +254,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -276,8 +280,8 @@ STAGE PLANS: TableScan HashTable Sink Operator keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Stage: Stage-6 Map Reduce @@ -287,8 +291,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 5 (type: int), 5 (type: int) @@ -315,8 +319,8 @@ STAGE PLANS: TableScan HashTable Sink Operator keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Stage: Stage-7 Map Reduce @@ -326,8 +330,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 5 (type: int), 5 (type: int) @@ -348,19 +352,23 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 5 (type: int), 5 (type: int) @@ -392,8 +400,8 @@ STAGE PLANS: Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Stage: Stage-10 Map Reduce @@ -402,7 +410,7 @@ STAGE PLANS: alias: d Statistics: Num rows: 55 Data size: 222 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE @@ -410,8 +418,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -428,11 +436,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[56][bigTable=?] in task 'Stage-7:MAPRED' is a cross product -Warning: Map Join MAPJOIN[48][bigTable=?] in task 'Stage-6:MAPRED' is a cross product -Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -Warning: Map Join MAPJOIN[57][bigTable=?] in task 'Stage-9:MAPRED' is a cross product -Warning: Map Join MAPJOIN[58][bigTable=?] in task 'Stage-10:MAPRED' is a cross product PREHOOK: query: select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_1 diff --git a/ql/src/test/results/clientpositive/spark/auto_join19.q.out b/ql/src/test/results/clientpositive/spark/auto_join19.q.out index 9e4fb8f..42072b9 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join19.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join19.q.out @@ -68,12 +68,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col2 input vertices: 1 Map 2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col2 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/auto_join9.q.out b/ql/src/test/results/clientpositive/spark/auto_join9.q.out index 568891b..5e0d548 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join9.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join9.q.out @@ -28,14 +28,14 @@ STAGE PLANS: Map 2 Map Operator Tree: TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: @@ -51,14 +51,14 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -66,12 +66,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 input vertices: 1 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out index fa80956..1ab695b 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out @@ -1560,7 +1560,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) @@ -1681,7 +1681,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) diff --git a/ql/src/test/results/clientpositive/spark/insert1.q.out b/ql/src/test/results/clientpositive/spark/insert1.q.out index e72ba16..50e8376 100644 --- a/ql/src/test/results/clientpositive/spark/insert1.q.out +++ b/ql/src/test/results/clientpositive/spark/insert1.q.out @@ -26,7 +26,7 @@ POSTHOOK: query: insert overwrite table insert1 select a.key, a.value from inser POSTHOOK: type: QUERY POSTHOOK: Input: default@insert2 POSTHOOK: Output: default@insert1 -POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: insert1.key SIMPLE [] POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain insert into table insert1 select a.key, a.value from insert2 a WHERE (a.key=-1) PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/join28.q.out b/ql/src/test/results/clientpositive/spark/join28.q.out index a8177f3..c2dc48e 100644 --- a/ql/src/test/results/clientpositive/spark/join28.q.out +++ b/ql/src/test/results/clientpositive/spark/join28.q.out @@ -114,12 +114,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join32.q.out b/ql/src/test/results/clientpositive/spark/join32.q.out index 1c1c103..8e157f2 100644 --- a/ql/src/test/results/clientpositive/spark/join32.q.out +++ b/ql/src/test/results/clientpositive/spark/join32.q.out @@ -196,7 +196,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 Local Work: @@ -275,7 +275,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 input vertices: 1 Map 2 Position of Big Table: 0 @@ -284,15 +284,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 input vertices: 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out index e2ae796..e4c9108 100644 --- a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out +++ b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out @@ -204,7 +204,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 Local Work: @@ -283,7 +283,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 input vertices: 1 Map 2 Position of Big Table: 0 @@ -292,15 +292,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 input vertices: 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1420,13 +1420,13 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 input vertices: 0 Map 1 Position of Big Table: 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2289,12 +2289,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2533,12 +2533,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3, _col4 + outputColumnNames: _col0, _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join33.q.out b/ql/src/test/results/clientpositive/spark/join33.q.out index 1c1c103..8e157f2 100644 --- a/ql/src/test/results/clientpositive/spark/join33.q.out +++ b/ql/src/test/results/clientpositive/spark/join33.q.out @@ -196,7 +196,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) Position of Big Table: 0 Local Work: @@ -275,7 +275,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col1 input vertices: 1 Map 2 Position of Big Table: 0 @@ -284,15 +284,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col3 (type: string) + 0 _col1 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col3, _col6 + outputColumnNames: _col0, _col1, _col4 input vertices: 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string) + expressions: _col1 (type: string), _col0 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out b/ql/src/test/results/clientpositive/spark/join9.q.out index c7440da..864dcf9 100644 --- a/ql/src/test/results/clientpositive/spark/join9.q.out +++ b/ql/src/test/results/clientpositive/spark/join9.q.out @@ -87,7 +87,7 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: src1 + alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -95,8 +95,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: _col0 + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -104,18 +104,16 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 0 + value expressions: _col1 (type: string) auto parallelism: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: hr=12 + base file name: src input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2008-04-08 - hr 12 properties: COLUMN_STATS_ACCURATE true bucket_count -1 @@ -123,13 +121,11 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.srcpart + name default.src numFiles 1 numRows 500 - partition_columns ds/hr - partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct srcpart { string key, string value} + serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -139,27 +135,30 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.srcpart - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct srcpart { string key, string value} + name default.src + numFiles 1 + numRows 500 + rawDataSize 5312 + serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcpart - name: default.srcpart + name: default.src + name: default.src Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=12 [src1] + /src [src2] Map 3 Map Operator Tree: TableScan - alias: src2 + alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -167,8 +166,8 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + expressions: key (type: string) + outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -176,16 +175,18 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 1 - value expressions: _col1 (type: string) auto parallelism: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: src + base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 12 properties: COLUMN_STATS_ACCURATE true bucket_count -1 @@ -193,11 +194,13 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.src + name default.srcpart numFiles 1 numRows 500 + partition_columns ds/hr + partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct src { string key, string value} + serialization.ddl struct srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -207,26 +210,23 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src + name: default.srcpart + name: default.srcpart Truncated Path -> Alias: - /src [src2] + /srcpart/ds=2008-04-08/hr=12 [src1] Reducer 2 Needs Tagging: true Reduce Operator Tree: @@ -236,10 +236,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col4 (type: string) + expressions: UDFToInteger(_col2) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out index d74b7d0..d1b2301 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out @@ -101,12 +101,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -351,12 +351,12 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col2 input vertices: 0 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col1 (type: string) + expressions: _col2 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out index 82593d4..ae40738 100644 --- a/ql/src/test/results/clientpositive/spark/pcr.q.out +++ b/ql/src/test/results/clientpositive/spark/pcr.q.out @@ -2543,7 +2543,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2))) (type: boolean) + predicate: (((key = 1) or (key = 2)) and (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2)))) (type: boolean) Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out index 6e34865..173132f 100644 --- a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out @@ -804,38 +804,34 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean) Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -1580,34 +1576,30 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col3, _col4 + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out index 3076e06..bfdd529 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out @@ -233,6 +233,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1) #### A masked pattern was here #### Vertices: Map 1 @@ -244,17 +246,29 @@ STAGE PLANS: predicate: (key = 238) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), value (type: string) + expressions: 238 (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + value expressions: _col1 (type: string) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table2 Stage: Stage-0 Move Operator @@ -283,7 +297,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 POSTHOOK: Output: default@test_table2@ds=2 -POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [] POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(*) from test_table2 where ds = '2' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out index 30a09ec..afd1e5d 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out @@ -46,9 +46,6 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product -Warning: Shuffle Join JOIN[22][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 6' is a cross product -Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 3' is a cross product PREHOOK: query: explain select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY @@ -63,9 +60,9 @@ STAGE PLANS: Stage: Stage-1 Spark Edges: - Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1) - Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 1), Map 7 (PARTITION-LEVEL SORT, 1) - Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 1), Reducer 6 (PARTITION-LEVEL SORT, 1) + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2) #### A masked pattern was here #### Vertices: Map 1 @@ -79,7 +76,9 @@ STAGE PLANS: Select Operator Statistics: Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: @@ -87,12 +86,14 @@ STAGE PLANS: alias: b Statistics: Num rows: 51 Data size: 206 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Map 5 Map Operator Tree: @@ -105,7 +106,9 @@ STAGE PLANS: Select Operator Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Map 7 Map Operator Tree: @@ -113,12 +116,14 @@ STAGE PLANS: alias: d Statistics: Num rows: 55 Data size: 222 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: @@ -126,11 +131,13 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: @@ -138,8 +145,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 5 (type: int), 5 (type: int) @@ -158,11 +165,13 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: int) + 1 _col0 (type: int) Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 5 (type: int) + sort order: + + Map-reduce partition columns: 5 (type: int) Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE Stage: Stage-0 @@ -171,9 +180,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[37][bigTable=?] in task 'Stage-1:MAPRED' is a cross product -Warning: Map Join MAPJOIN[39][bigTable=?] in task 'Stage-1:MAPRED' is a cross product -Warning: Map Join MAPJOIN[38][bigTable=?] in task 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- explain -- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value @@ -207,14 +213,14 @@ STAGE PLANS: alias: b Statistics: Num rows: 51 Data size: 206 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Local Work: Map Reduce Local Work @@ -236,15 +242,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) input vertices: 1 Map 2 Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Local Work: Map Reduce Local Work Map 3 @@ -259,8 +265,8 @@ STAGE PLANS: Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) Local Work: Map Reduce Local Work @@ -274,7 +280,7 @@ STAGE PLANS: alias: d Statistics: Num rows: 55 Data size: 222 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5 = key) (type: boolean) + predicate: (key = 5) (type: boolean) Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 27 Data size: 108 Basic stats: COMPLETE Column stats: NONE @@ -282,8 +288,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) input vertices: 0 Map 3 Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE @@ -291,8 +297,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 5 (type: int) + 1 5 (type: int) input vertices: 0 Map 1 Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE @@ -316,9 +322,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[37][bigTable=?] in task 'Stage-1:MAPRED' is a cross product -Warning: Map Join MAPJOIN[39][bigTable=?] in task 'Stage-1:MAPRED' is a cross product -Warning: Map Join MAPJOIN[38][bigTable=?] in task 'Stage-2:MAPRED' is a cross product PREHOOK: query: select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_1 diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out index babaff8..17821a8 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out @@ -150,7 +150,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -160,7 +160,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key PREHOOK: type: QUERY @@ -335,7 +335,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -345,7 +345,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY @@ -355,7 +355,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 -POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key @@ -381,7 +381,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY @@ -391,7 +391,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -401,7 +401,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 -POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key @@ -427,7 +427,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY @@ -437,7 +437,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY @@ -447,7 +447,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_i POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 -POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key diff --git a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out index 1cae83a..a205cf6 100644 --- a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out @@ -413,7 +413,7 @@ PREHOOK: Input: default@t2 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key @@ -439,7 +439,7 @@ Operator:JOIN_9 Table:default@t1 Keys:key Table:default@t2 -Keys:key +Keys:val,key PREHOOK: query: -- no mapping on functions SELECT * @@ -474,7 +474,7 @@ PREHOOK: Input: default@t2 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key @@ -505,7 +505,7 @@ PREHOOK: Input: default@t3 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key @@ -518,7 +518,6 @@ Keys:val 13.0 1 17.0 1 46.0 1 -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 4' is a cross product PREHOOK: query: -- join followed by join SELECT * FROM @@ -544,7 +543,7 @@ PREHOOK: Input: default@t3 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key diff --git a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out index e2f5269..324e8b7 100644 --- a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out +++ b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out @@ -51,4 +51,4 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testdate POSTHOOK: Output: default@testdate POSTHOOK: Lineage: testdate.dt EXPRESSION [(testdate)testdate.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: testdate.id EXPRESSION [(testdate)testdate.FieldSchema(name:id, type:int, comment:null), ] +POSTHOOK: Lineage: testdate.id EXPRESSION [] diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out index cef6138..374ccb2 100644 --- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out @@ -201,21 +201,21 @@ STAGE PLANS: alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean) + predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int) - outputColumnNames: _col0 + expressions: l_orderkey (type: int), l_linenumber (type: int) + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int) + keys: _col0 (type: int), _col1 (type: int) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) Local Work: Map Reduce Local Work @@ -239,8 +239,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 input vertices: 1 Map 2 diff --git a/ql/src/test/results/clientpositive/stats_empty_partition.q.out b/ql/src/test/results/clientpositive/stats_empty_partition.q.out index c13817e..202263e 100644 --- a/ql/src/test/results/clientpositive/stats_empty_partition.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_partition.q.out @@ -20,7 +20,7 @@ POSTHOOK: query: insert overwrite table tmptable partition (part = '1') select * POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@tmptable@part=1 -POSTHOOK: Lineage: tmptable PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable PARTITION(part=1).key SIMPLE [] POSTHOOK: Lineage: tmptable PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: describe formatted tmptable partition (part = '1') PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out index 2973e1f..7228175 100644 --- a/ql/src/test/results/clientpositive/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin.q.out @@ -1,4 +1,4 @@ -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, non corr explain select * @@ -151,7 +151,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from src where src.key not in ( select key from src s1 where s1.key > '2') @@ -285,7 +285,7 @@ POSTHOOK: Input: default@src 199 val_199 199 val_199 2 val_2 -Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select p_mfgr, b.p_name, p_size @@ -530,7 +530,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in @@ -898,7 +898,7 @@ almond aquamarine sandy cyan gainsboro 18 almond aquamarine yellow dodger mint 7 almond azure aquamarine papaya violet 12 almond azure blanched chiffon midnight 23 -Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- agg, corr explain select p_mfgr, p_name, p_size @@ -1212,7 +1212,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) @@ -1288,7 +1288,7 @@ POSTHOOK: Input: default@lineitem 139636 1 175839 1 182052 1 -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- alternate not in syntax select * from src @@ -1452,7 +1452,7 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@t1_v POSTHOOK: Output: database:default POSTHOOK: Output: default@T2_v -Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain select * from T1_v where T1_v.key not in (select T2_v.key from T2_v) @@ -1597,7 +1597,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from T1_v where T1_v.key not in (select T2_v.key from T2_v) PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out index c08e2b9..4626f8a 100644 --- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out +++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out @@ -1,4 +1,4 @@ -Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- non agg, non corr -- JAVA_VERSION_SPECIFIC_OUTPUT @@ -188,7 +188,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select b.p_mfgr, min(p_retailprice) @@ -445,7 +445,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index c09d327..87aaed2 100644 --- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -775,7 +775,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select p_mfgr, b.p_name, p_size diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out index 470fa83..2afd7ff 100644 --- a/ql/src/test/results/clientpositive/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/subquery_views.q.out @@ -111,8 +111,8 @@ where `b`.`key` not in from `default`.`src` `a` where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11' ), tableType:VIRTUAL_VIEW) -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: explain select * from cv2 where cv2.key in (select key from cv2 c where c.key < '11') @@ -420,8 +420,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: select * from cv2 where cv2.key in (select key from cv2 c where c.key < '11') PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/table_access_keys_stats.q.out b/ql/src/test/results/clientpositive/table_access_keys_stats.q.out index 3d56fbd..a205cf6 100644 --- a/ql/src/test/results/clientpositive/table_access_keys_stats.q.out +++ b/ql/src/test/results/clientpositive/table_access_keys_stats.q.out @@ -413,7 +413,7 @@ PREHOOK: Input: default@t2 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key @@ -439,7 +439,7 @@ Operator:JOIN_9 Table:default@t1 Keys:key Table:default@t2 -Keys:key +Keys:val,key PREHOOK: query: -- no mapping on functions SELECT * @@ -474,7 +474,7 @@ PREHOOK: Input: default@t2 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key @@ -505,7 +505,7 @@ PREHOOK: Input: default@t3 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key @@ -518,7 +518,6 @@ Keys:val 13.0 1 17.0 1 46.0 1 -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-3:MAPRED' is a cross product PREHOOK: query: -- join followed by join SELECT * FROM @@ -544,7 +543,7 @@ PREHOOK: Input: default@t3 #### A masked pattern was here #### Operator:JOIN_9 Table:default@t1 -Keys:key +Keys:val,key Table:default@t2 Keys:key diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out index 2447f19..90bc6c0 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out @@ -1482,7 +1482,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) @@ -1595,7 +1595,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out index d95fe2a..77a84fb 100644 --- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out @@ -1870,7 +1870,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -1894,42 +1893,42 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1963,8 +1962,25 @@ STAGE PLANS: Select Operator Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: '2008-04-08' (type: string) + sort order: + + Map-reduce partition columns: '2008-04-08' (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2008-04-08' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 1 Stage: Stage-0 Fetch Operator @@ -1972,18 +1988,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' @@ -4322,7 +4341,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -4345,33 +4363,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 2 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator @@ -4385,11 +4403,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) input vertices: 0 Map 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true Group By Operator aggregations: count() mode: hash @@ -4420,18 +4439,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out index a3d1f87..135368f 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out @@ -2588,11 +2588,11 @@ Stage-0 | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ - | Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + | Statistics:Num rows: 5 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col1 (type: int) | Select Operator [SEL_2] | outputColumnNames:["_col0","_col1"] - | Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + | Statistics:Num rows: 5 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE | Filter Operator [FIL_16] | predicate:((((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0))) and key is not null) (type: boolean) | Statistics:Num rows: 5 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE @@ -2915,7 +2915,7 @@ Stage-0 limit:-1 Select Operator [SEL_2] outputColumnNames:["_col0"] - Filter Operator [FIL_4] + Filter Operator [FIL_6] predicate:((c_int = -6) or (c_int = 6)) (type: boolean) TableScan [TS_0] alias:cbo_t1 @@ -3185,17 +3185,17 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_16] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_21] + Merge Join Operator [MERGEJOIN_19] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_9] | key expressions:_col1 (type: string), _col0 (type: string) | Map-reduce partition columns:_col1 (type: string), _col0 (type: string) | sort order:++ @@ -3257,17 +3257,17 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_16] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_21] + Merge Join Operator [MERGEJOIN_19] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_9] | key expressions:_col1 (type: string), _col0 (type: string) | Map-reduce partition columns:_col1 (type: string), _col0 (type: string) | sort order:++ @@ -3319,17 +3319,17 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_16] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_21] + Merge Join Operator [MERGEJOIN_19] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"} | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_9] | key expressions:_col0 (type: string) | Map-reduce partition columns:_col0 (type: string) | sort order:+ @@ -3497,49 +3497,49 @@ Stage-0 limit:-1 Stage-1 Reducer 4 - File Output Operator [FS_37] + File Output Operator [FS_33] compressed:false Statistics:Num rows: 34 Data size: 6324 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Merge Join Operator [MERGEJOIN_50] + Merge Join Operator [MERGEJOIN_46] | condition map:[{"":"Left Semi Join 0 to 1"}] | keys:{"0":"_col2 (type: bigint)","1":"_col0 (type: bigint)"} | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 34 Data size: 6324 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 3 [SIMPLE_EDGE] - | Reduce Output Operator [RS_32] + | Reduce Output Operator [RS_28] | key expressions:_col2 (type: bigint) | Map-reduce partition columns:_col2 (type: bigint) | sort order:+ | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string), _col1 (type: string) - | Filter Operator [FIL_43] + | Filter Operator [FIL_39] | predicate:_col2 is not null (type: boolean) | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_20] + | Group By Operator [GBY_16] | | aggregations:["count(VALUE._col0)"] | | keys:KEY._col0 (type: string), KEY._col1 (type: string) | | outputColumnNames:["_col0","_col1","_col2"] | | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE | |<-Reducer 2 [SIMPLE_EDGE] - | Reduce Output Operator [RS_19] + | Reduce Output Operator [RS_15] | key expressions:_col0 (type: string), _col1 (type: string) | Map-reduce partition columns:_col0 (type: string), _col1 (type: string) | sort order:++ | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col2 (type: bigint) - | Group By Operator [GBY_18] + | Group By Operator [GBY_14] | aggregations:["count()"] | keys:_col0 (type: string), _col1 (type: string) | outputColumnNames:["_col0","_col1","_col2"] | Statistics:Num rows: 83 Data size: 15438 Basic stats: COMPLETE Column stats: COMPLETE - | Merge Join Operator [MERGEJOIN_49] + | Merge Join Operator [MERGEJOIN_45] | | condition map:[{"":"Left Semi Join 0 to 1"}] | | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"} | | outputColumnNames:["_col0","_col1"] | | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 1 [SIMPLE_EDGE] - | | Reduce Output Operator [RS_13] + | | Reduce Output Operator [RS_9] | | key expressions:_col0 (type: string) | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out index 8004641..e54dd0a 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out @@ -198,8 +198,8 @@ Stage-0 Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Merge Join Operator [MERGEJOIN_28] | condition map:[{"":"Inner Join 0 to 1"}] - | keys:{"0":"_col3 (type: string)","1":"_col0 (type: string)"} - | outputColumnNames:["_col0","_col3","_col6"] + | keys:{"0":"_col1 (type: string)","1":"_col0 (type: string)"} + | outputColumnNames:["_col0","_col1","_col4"] | Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE |<-Map 5 [SIMPLE_EDGE] | Reduce Output Operator [RS_15] @@ -219,15 +219,15 @@ Stage-0 | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE |<-Reducer 2 [SIMPLE_EDGE] Reduce Output Operator [RS_13] - key expressions:_col3 (type: string) - Map-reduce partition columns:_col3 (type: string) + key expressions:_col1 (type: string) + Map-reduce partition columns:_col1 (type: string) sort order:+ Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE value expressions:_col0 (type: string) Merge Join Operator [MERGEJOIN_27] | condition map:[{"":"Inner Join 0 to 1"}] | keys:{"0":"_col0 (type: string)","1":"_col1 (type: string)"} - | outputColumnNames:["_col0","_col3"] + | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE |<-Map 1 [SIMPLE_EDGE] | Reduce Output Operator [RS_8] @@ -367,53 +367,53 @@ Stage-0 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint) Group By Operator [GBY_61] - aggregations:["count(_col13)","count(_col21)","count(_col3)"] - keys:_col2 (type: string), _col12 (type: string), _col20 (type: string) + aggregations:["count(_col9)","count(_col15)","count(_col3)"] + keys:_col2 (type: string), _col8 (type: string), _col14 (type: string) outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Select Operator [SEL_60] - outputColumnNames:["_col2","_col12","_col20","_col13","_col21","_col3"] + outputColumnNames:["_col2","_col8","_col14","_col9","_col15","_col3"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Merge Join Operator [MERGEJOIN_110] | condition map:[{"":"Inner Join 0 to 1"}] - | keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col15 (type: string), _col17 (type: string)"} - | outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"] + | keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col10 (type: string), _col12 (type: string)"} + | outputColumnNames:["_col2","_col3","_col8","_col9","_col14","_col15"] | Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE |<-Reducer 11 [SIMPLE_EDGE] | Reduce Output Operator [RS_58] - | key expressions:_col15 (type: string), _col17 (type: string) - | Map-reduce partition columns:_col15 (type: string), _col17 (type: string) + | key expressions:_col10 (type: string), _col12 (type: string) + | Map-reduce partition columns:_col10 (type: string), _col12 (type: string) | sort order:++ | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE - | value expressions:_col6 (type: string), _col7 (type: string), _col14 (type: string) + | value expressions:_col3 (type: string), _col4 (type: string), _col9 (type: string) | Select Operator [SEL_49] - | outputColumnNames:["_col14","_col15","_col17","_col6","_col7"] + | outputColumnNames:["_col10","_col12","_col3","_col4","_col9"] | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE | Merge Join Operator [MERGEJOIN_109] | | condition map:[{"":"Inner Join 0 to 1"}] - | | keys:{"0":"_col4 (type: string), _col6 (type: string)","1":"_col2 (type: string), _col4 (type: string)"} - | | outputColumnNames:["_col2","_col3","_col14","_col15","_col17"] + | | keys:{"0":"_col3 (type: string), _col5 (type: string)","1":"_col1 (type: string), _col3 (type: string)"} + | | outputColumnNames:["_col1","_col2","_col9","_col10","_col12"] | | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE | |<-Reducer 10 [SIMPLE_EDGE] | | Reduce Output Operator [RS_45] - | | key expressions:_col4 (type: string), _col6 (type: string) - | | Map-reduce partition columns:_col4 (type: string), _col6 (type: string) + | | key expressions:_col3 (type: string), _col5 (type: string) + | | Map-reduce partition columns:_col3 (type: string), _col5 (type: string) | | sort order:++ | | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col3 (type: string) + | | value expressions:_col1 (type: string), _col2 (type: string) | | Merge Join Operator [MERGEJOIN_107] | | | condition map:[{"":"Inner Join 0 to 1"}] - | | | keys:{"0":"_col3 (type: string)","1":"_col1 (type: string)"} - | | | outputColumnNames:["_col2","_col3","_col4","_col6"] + | | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"} + | | | outputColumnNames:["_col1","_col2","_col3","_col5"] | | | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE | | |<-Map 14 [SIMPLE_EDGE] | | | Reduce Output Operator [RS_42] - | | | key expressions:_col1 (type: string) - | | | Map-reduce partition columns:_col1 (type: string) + | | | key expressions:_col0 (type: string) + | | | Map-reduce partition columns:_col0 (type: string) | | | sort order:+ | | | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | | | Select Operator [SEL_16] - | | | outputColumnNames:["_col1"] + | | | outputColumnNames:["_col0"] | | | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | | | Filter Operator [FIL_101] | | | predicate:((key = 'src1key') and value is not null) (type: boolean) @@ -423,15 +423,15 @@ Stage-0 | | | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE | | |<-Reducer 9 [SIMPLE_EDGE] | | Reduce Output Operator [RS_40] - | | key expressions:_col3 (type: string) - | | Map-reduce partition columns:_col3 (type: string) + | | key expressions:_col2 (type: string) + | | Map-reduce partition columns:_col2 (type: string) | | sort order:+ | | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col4 (type: string), _col6 (type: string) + | | value expressions:_col1 (type: string), _col3 (type: string), _col5 (type: string) | | Merge Join Operator [MERGEJOIN_106] | | | condition map:[{"":"Inner Join 0 to 1"}] - | | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"} - | | | outputColumnNames:["_col2","_col3","_col4","_col6"] + | | | keys:{"0":"_col1 (type: string)","1":"_col0 (type: string)"} + | | | outputColumnNames:["_col1","_col2","_col3","_col5"] | | | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE | | |<-Map 13 [SIMPLE_EDGE] | | | Reduce Output Operator [RS_37] @@ -450,15 +450,15 @@ Stage-0 | | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE | | |<-Reducer 8 [SIMPLE_EDGE] | | Reduce Output Operator [RS_35] - | | key expressions:_col2 (type: string) - | | Map-reduce partition columns:_col2 (type: string) + | | key expressions:_col1 (type: string) + | | Map-reduce partition columns:_col1 (type: string) | | sort order:+ | | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string) + | | value expressions:_col2 (type: string), _col3 (type: string), _col5 (type: string) | | Merge Join Operator [MERGEJOIN_105] | | | condition map:[{"":"Inner Join 0 to 1"}] - | | | keys:{"0":"_col1 (type: string)","1":"_col3 (type: string)"} - | | | outputColumnNames:["_col2","_col3","_col4","_col6"] + | | | keys:{"0":"_col0 (type: string)","1":"_col3 (type: string)"} + | | | outputColumnNames:["_col1","_col2","_col3","_col5"] | | | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE | | |<-Map 12 [SIMPLE_EDGE] | | | Reduce Output Operator [RS_32] @@ -478,12 +478,12 @@ Stage-0 | | | Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE | | |<-Map 7 [SIMPLE_EDGE] | | Reduce Output Operator [RS_30] - | | key expressions:_col1 (type: string) - | | Map-reduce partition columns:_col1 (type: string) + | | key expressions:_col0 (type: string) + | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE | | Select Operator [SEL_7] - | | outputColumnNames:["_col1"] + | | outputColumnNames:["_col0"] | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE | | Filter Operator [FIL_98] | | predicate:((key = 'srcpartkey') and value is not null) (type: boolean) @@ -493,15 +493,15 @@ Stage-0 | | Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE | |<-Reducer 16 [SIMPLE_EDGE] | Reduce Output Operator [RS_47] - | key expressions:_col2 (type: string), _col4 (type: string) - | Map-reduce partition columns:_col2 (type: string), _col4 (type: string) + | key expressions:_col1 (type: string), _col3 (type: string) + | Map-reduce partition columns:_col1 (type: string), _col3 (type: string) | sort order:++ | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE - | value expressions:_col3 (type: string), _col5 (type: string) + | value expressions:_col2 (type: string), _col4 (type: string) | Merge Join Operator [MERGEJOIN_108] | | condition map:[{"":"Inner Join 0 to 1"}] | | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"} - | | outputColumnNames:["_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE | |<-Map 15 [SIMPLE_EDGE] | | Reduce Output Operator [RS_24] @@ -509,9 +509,9 @@ Stage-0 | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string) + | | value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string) | | Select Operator [SEL_19] - | | outputColumnNames:["_col0","_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col0","_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE | | Filter Operator [FIL_102] | | predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean) @@ -1406,8 +1406,8 @@ Stage-0 Map Join Operator [MAPJOIN_28] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 1":"_col3 (type: string)","Map 3":"_col0 (type: string)"} - | outputColumnNames:["_col0","_col3","_col6"] + | keys:{"Map 1":"_col1 (type: string)","Map 3":"_col0 (type: string)"} + | outputColumnNames:["_col0","_col1","_col4"] | Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE |<-Map 3 [BROADCAST_EDGE] | Reduce Output Operator [RS_15] @@ -1429,7 +1429,7 @@ Stage-0 | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true | keys:{"Map 1":"_col0 (type: string)","Map 2":"_col1 (type: string)"} - | outputColumnNames:["_col0","_col3"] + | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE |<-Map 2 [BROADCAST_EDGE] | Reduce Output Operator [RS_10] @@ -1559,18 +1559,18 @@ Stage-0 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint) Group By Operator [GBY_61] - aggregations:["count(_col13)","count(_col21)","count(_col3)"] - keys:_col2 (type: string), _col12 (type: string), _col20 (type: string) + aggregations:["count(_col9)","count(_col15)","count(_col3)"] + keys:_col2 (type: string), _col8 (type: string), _col14 (type: string) outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Select Operator [SEL_60] - outputColumnNames:["_col2","_col12","_col20","_col13","_col21","_col3"] + outputColumnNames:["_col2","_col8","_col14","_col9","_col15","_col3"] Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE Map Join Operator [MAPJOIN_110] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col15 (type: string), _col17 (type: string)"} - | outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"] + | keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col10 (type: string), _col12 (type: string)"} + | outputColumnNames:["_col2","_col3","_col8","_col9","_col14","_col15"] | Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE |<-Map 2 [BROADCAST_EDGE] | Reduce Output Operator [RS_56] @@ -1611,26 +1611,26 @@ Stage-0 | alias:d1 | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE |<-Select Operator [SEL_49] - outputColumnNames:["_col14","_col15","_col17","_col6","_col7"] + outputColumnNames:["_col10","_col12","_col3","_col4","_col9"] Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE Map Join Operator [MAPJOIN_109] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col4 (type: string), _col6 (type: string)","Map 10":"_col2 (type: string), _col4 (type: string)"} - | outputColumnNames:["_col2","_col3","_col14","_col15","_col17"] + | keys:{"Map 3":"_col3 (type: string), _col5 (type: string)","Map 10":"_col1 (type: string), _col3 (type: string)"} + | outputColumnNames:["_col1","_col2","_col9","_col10","_col12"] | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE |<-Map 10 [BROADCAST_EDGE] | Reduce Output Operator [RS_47] - | key expressions:_col2 (type: string), _col4 (type: string) - | Map-reduce partition columns:_col2 (type: string), _col4 (type: string) + | key expressions:_col1 (type: string), _col3 (type: string) + | Map-reduce partition columns:_col1 (type: string), _col3 (type: string) | sort order:++ | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE - | value expressions:_col3 (type: string), _col5 (type: string) + | value expressions:_col2 (type: string), _col4 (type: string) | Map Join Operator [MAPJOIN_108] | | condition map:[{"":"Inner Join 0 to 1"}] | | HybridGraceHashJoin:true | | keys:{"Map 9":"_col0 (type: string)","Map 10":"_col0 (type: string)"} - | | outputColumnNames:["_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE | |<-Map 9 [BROADCAST_EDGE] | | Reduce Output Operator [RS_24] @@ -1638,9 +1638,9 @@ Stage-0 | | Map-reduce partition columns:_col0 (type: string) | | sort order:+ | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE - | | value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string) + | | value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string) | | Select Operator [SEL_19] - | | outputColumnNames:["_col0","_col2","_col3","_col4","_col5"] + | | outputColumnNames:["_col0","_col1","_col2","_col3","_col4"] | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE | | Filter Operator [FIL_102] | | predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean) @@ -1660,17 +1660,17 @@ Stage-0 |<-Map Join Operator [MAPJOIN_107] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col3 (type: string)","Map 8":"_col1 (type: string)"} - | outputColumnNames:["_col2","_col3","_col4","_col6"] + | keys:{"Map 3":"_col2 (type: string)","Map 8":"_col0 (type: string)"} + | outputColumnNames:["_col1","_col2","_col3","_col5"] | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE |<-Map 8 [BROADCAST_EDGE] | Reduce Output Operator [RS_42] - | key expressions:_col1 (type: string) - | Map-reduce partition columns:_col1 (type: string) + | key expressions:_col0 (type: string) + | Map-reduce partition columns:_col0 (type: string) | sort order:+ | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | Select Operator [SEL_16] - | outputColumnNames:["_col1"] + | outputColumnNames:["_col0"] | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE | Filter Operator [FIL_101] | predicate:((key = 'src1key') and value is not null) (type: boolean) @@ -1681,8 +1681,8 @@ Stage-0 |<-Map Join Operator [MAPJOIN_106] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col2 (type: string)","Map 7":"_col0 (type: string)"} - | outputColumnNames:["_col2","_col3","_col4","_col6"] + | keys:{"Map 3":"_col1 (type: string)","Map 7":"_col0 (type: string)"} + | outputColumnNames:["_col1","_col2","_col3","_col5"] | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE |<-Map 7 [BROADCAST_EDGE] | Reduce Output Operator [RS_37] @@ -1702,8 +1702,8 @@ Stage-0 |<-Map Join Operator [MAPJOIN_105] | condition map:[{"":"Inner Join 0 to 1"}] | HybridGraceHashJoin:true - | keys:{"Map 3":"_col1 (type: string)","Map 6":"_col3 (type: string)"} - | outputColumnNames:["_col2","_col3","_col4","_col6"] + | keys:{"Map 3":"_col0 (type: string)","Map 6":"_col3 (type: string)"} + | outputColumnNames:["_col1","_col2","_col3","_col5"] | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE |<-Map 6 [BROADCAST_EDGE] | Reduce Output Operator [RS_32] @@ -1722,7 +1722,7 @@ Stage-0 | alias:ss | Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE |<-Select Operator [SEL_7] - outputColumnNames:["_col1"] + outputColumnNames:["_col0"] Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator [FIL_98] predicate:((key = 'srcpartkey') and value is not null) (type: boolean) diff --git a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out index 63c813d..49d7bcf 100644 --- a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out +++ b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out @@ -1234,7 +1234,6 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default POSTHOOK: Output: default@decimal_mapjoin -Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 FROM decimal_mapjoin l JOIN decimal_mapjoin r ON l.cint = r.cint @@ -1272,8 +1271,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 6981 (type: int) + 1 6981 (type: int) outputColumnNames: _col0, _col2 input vertices: 1 Map 2 @@ -1296,14 +1295,16 @@ STAGE PLANS: alias: l Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (6981 = cint) (type: boolean) + predicate: (cint = 6981) (type: boolean) Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdecimal2 (type: decimal(23,14)) outputColumnNames: _col0 Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 6981 (type: int) + sort order: + + Map-reduce partition columns: 6981 (type: int) Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(23,14)) Execution mode: vectorized @@ -1314,7 +1315,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 FROM decimal_mapjoin l JOIN decimal_mapjoin r ON l.cint = r.cint @@ -1429,7 +1429,6 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 -515.6210729730 NULL 6981 6981 -515.6210729730 NULL 6981 6981 -515.6210729730 NULL -Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 FROM decimal_mapjoin l JOIN decimal_mapjoin r ON l.cint = r.cint @@ -1467,12 +1466,13 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 6981 (type: int) + 1 6981 (type: int) outputColumnNames: _col0, _col2 input vertices: 1 Map 2 Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true Select Operator expressions: 6981 (type: int), 6981 (type: int), _col0 (type: decimal(20,10)), _col2 (type: decimal(23,14)) outputColumnNames: _col0, _col1, _col2, _col3 @@ -1491,14 +1491,16 @@ STAGE PLANS: alias: l Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (6981 = cint) (type: boolean) + predicate: (cint = 6981) (type: boolean) Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdecimal2 (type: decimal(23,14)) outputColumnNames: _col0 Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 6981 (type: int) + sort order: + + Map-reduce partition columns: 6981 (type: int) Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(23,14)) Execution mode: vectorized @@ -1509,7 +1511,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 FROM decimal_mapjoin l JOIN decimal_mapjoin r ON l.cint = r.cint diff --git a/ql/src/test/results/clientpositive/tez/mergejoin.q.out b/ql/src/test/results/clientpositive/tez/mergejoin.q.out index 34e4da3..e23f40a 100644 --- a/ql/src/test/results/clientpositive/tez/mergejoin.q.out +++ b/ql/src/test/results/clientpositive/tez/mergejoin.q.out @@ -2632,7 +2632,6 @@ POSTHOOK: Input: default@tab_part POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### 480 -Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@tab @@ -2652,7 +2651,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08 0 val_0 2008-04-08 NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 -Warning: Shuffle Join MERGEJOIN[15][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@tab @@ -2667,8 +2665,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 -Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -Warning: Shuffle Join MERGEJOIN[25][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join @@ -2687,7 +2683,6 @@ POSTHOOK: Input: default@tab POSTHOOK: Input: default@tab_part POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### -Warning: Shuffle Join MERGEJOIN[25][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join @@ -2713,7 +2708,6 @@ NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 Warning: Shuffle Join MERGEJOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a join @@ -3240,8 +3234,6 @@ NULL NULL NULL NULL NULL NULL 97 val_97 2008-04-08 NULL NULL NULL NULL NULL NULL 97 val_97 2008-04-08 NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 -Warning: Shuffle Join MERGEJOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product PREHOOK: query: select * from (select * from tab where tab.key = 0)a join diff --git a/ql/src/test/results/clientpositive/tez/tez_self_join.q.out b/ql/src/test/results/clientpositive/tez/tez_self_join.q.out index 0064c98..abe6c64 100644 --- a/ql/src/test/results/clientpositive/tez/tez_self_join.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_self_join.q.out @@ -42,7 +42,6 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@values__tmp__table__2 POSTHOOK: Output: default@tez_self_join2 POSTHOOK: Lineage: tez_self_join2.id1 EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -Warning: Shuffle Join MERGEJOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: explain select s.id2, s.id3 from @@ -90,7 +89,9 @@ STAGE PLANS: outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 'ab' (type: string) + sort order: + + Map-reduce partition columns: 'ab' (type: string) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) Map 4 @@ -99,12 +100,14 @@ STAGE PLANS: alias: self1 Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ('ab' = id3) (type: boolean) + predicate: (id3 = 'ab') (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: 'ab' (type: string) + sort order: + + Map-reduce partition columns: 'ab' (type: string) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Map 5 Map Operator Tree: @@ -129,8 +132,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 'ab' (type: string) + 1 'ab' (type: string) outputColumnNames: _col0, _col2 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -167,7 +170,6 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select s.id2, s.id3 from ( diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out index 9a5d047..5bc04d7 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out @@ -118,7 +118,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -268,7 +268,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -419,7 +419,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out index 5e5d38e..895dad6 100644 --- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out @@ -205,8 +205,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 input vertices: 1 Map 2 @@ -224,21 +224,21 @@ STAGE PLANS: alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean) + predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int) - outputColumnNames: _col0 + expressions: l_orderkey (type: int), l_linenumber (type: int) + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int) + keys: _col0 (type: int), _col1 (type: int) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) + key expressions: _col0 (type: int), _col1 (type: int) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int) Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out index 9451b98..00a9448 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out @@ -1899,7 +1899,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -1923,42 +1922,42 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1994,8 +1993,25 @@ STAGE PLANS: Select Operator Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + key expressions: '2008-04-08' (type: string) + sort order: + + Map-reduce partition columns: '2008-04-08' (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2008-04-08' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 1 Stage: Stage-0 Fetch Operator @@ -2003,18 +2019,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' @@ -4393,7 +4412,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: -- parent is reduce tasks EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY @@ -4416,33 +4434,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: (ds = '2008-04-08') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 2 Map Operator Tree: TableScan alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 3 Execution mode: vectorized Reduce Operator Tree: @@ -4457,11 +4475,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 _col0 (type: string) + 1 '2008-04-08' (type: string) input vertices: 0 Map 1 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + HybridGraceHashJoin: true Group By Operator aggregations: count() mode: hash @@ -4493,18 +4512,21 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' diff --git a/ql/src/test/results/clientpositive/udf1.q.out b/ql/src/test/results/clientpositive/udf1.q.out index dffbccf..b3b694b 100644 --- a/ql/src/test/results/clientpositive/udf1.q.out +++ b/ql/src/test/results/clientpositive/udf1.q.out @@ -137,26 +137,26 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c10 EXPRESSION [] -POSTHOOK: Lineage: dest1.c11 EXPRESSION [] -POSTHOOK: Lineage: dest1.c12 EXPRESSION [] -POSTHOOK: Lineage: dest1.c13 EXPRESSION [] -POSTHOOK: Lineage: dest1.c14 EXPRESSION [] -POSTHOOK: Lineage: dest1.c15 EXPRESSION [] -POSTHOOK: Lineage: dest1.c16 EXPRESSION [] -POSTHOOK: Lineage: dest1.c17 EXPRESSION [] -POSTHOOK: Lineage: dest1.c18 EXPRESSION [] -POSTHOOK: Lineage: dest1.c19 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c20 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c10 SIMPLE [] +POSTHOOK: Lineage: dest1.c11 SIMPLE [] +POSTHOOK: Lineage: dest1.c12 SIMPLE [] +POSTHOOK: Lineage: dest1.c13 SIMPLE [] +POSTHOOK: Lineage: dest1.c14 SIMPLE [] +POSTHOOK: Lineage: dest1.c15 SIMPLE [] +POSTHOOK: Lineage: dest1.c16 SIMPLE [] +POSTHOOK: Lineage: dest1.c17 SIMPLE [] +POSTHOOK: Lineage: dest1.c18 SIMPLE [] +POSTHOOK: Lineage: dest1.c19 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] +POSTHOOK: Lineage: dest1.c20 SIMPLE [] +POSTHOOK: Lineage: dest1.c3 SIMPLE [] +POSTHOOK: Lineage: dest1.c4 SIMPLE [] +POSTHOOK: Lineage: dest1.c5 SIMPLE [] +POSTHOOK: Lineage: dest1.c6 SIMPLE [] +POSTHOOK: Lineage: dest1.c7 SIMPLE [] +POSTHOOK: Lineage: dest1.c8 SIMPLE [] +POSTHOOK: Lineage: dest1.c9 SIMPLE [] PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 diff --git a/ql/src/test/results/clientpositive/udf_10_trims.q.out b/ql/src/test/results/clientpositive/udf_10_trims.q.out index 2f79723..3a5303a 100644 --- a/ql/src/test/results/clientpositive/udf_10_trims.q.out +++ b/ql/src/test/results/clientpositive/udf_10_trims.q.out @@ -117,4 +117,4 @@ WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] diff --git a/ql/src/test/results/clientpositive/udf_concat_insert2.q.out b/ql/src/test/results/clientpositive/udf_concat_insert2.q.out index f1b70fe..d68bd76 100644 --- a/ql/src/test/results/clientpositive/udf_concat_insert2.q.out +++ b/ql/src/test/results/clientpositive/udf_concat_insert2.q.out @@ -16,7 +16,7 @@ INSERT OVERWRITE TABLE dest1 SELECT concat('1234', 'abc', 'extra argument'), src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [] +POSTHOOK: Lineage: dest1.key SIMPLE [] POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/union_date_trim.q.out b/ql/src/test/results/clientpositive/union_date_trim.q.out index e2f5269..324e8b7 100644 --- a/ql/src/test/results/clientpositive/union_date_trim.q.out +++ b/ql/src/test/results/clientpositive/union_date_trim.q.out @@ -51,4 +51,4 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testdate POSTHOOK: Output: default@testdate POSTHOOK: Lineage: testdate.dt EXPRESSION [(testdate)testdate.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: testdate.id EXPRESSION [(testdate)testdate.FieldSchema(name:id, type:int, comment:null), ] +POSTHOOK: Lineage: testdate.id EXPRESSION [] diff --git a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out index 952eef9..b075bf6 100644 --- a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out +++ b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out @@ -94,13 +94,17 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan alias: union_all_bug_test_2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -113,13 +117,17 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/vector_decimal_round.q.out index 25e5cfa..ec6226e 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out @@ -106,7 +106,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -242,7 +242,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -379,7 +379,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out index 2973008..2306ddb 100644 --- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out @@ -499,8 +499,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 Statistics: Num rows: 14 Data size: 1714 Basic stats: COMPLETE Column stats: NONE File Output Operator