diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java index 336745b..0704679 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSemanticException.java @@ -31,7 +31,7 @@ public enum UnsupportedFeature { Distinct_without_an_aggreggation, Duplicates_in_RR, Filter_expression_with_non_boolean_return_type, - Having_clause_without_any_groupby, Hint, Invalid_column_reference, Invalid_decimal, + Having_clause_without_any_groupby, Hint, Invalid_column_reference, Invalid_decimal, Invalid_interval, Less_than_equal_greater_than, Multi_insert, Others, Same_name_in_multiple_expressions, Schema_less_table, Select_alias_in_having_clause, Select_transform, Subquery, Table_sample_clauses, UDTF, Union_type, Unique_join diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelFactories.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelFactories.java new file mode 100644 index 0000000..6de7948 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelFactories.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.calcite; + +import org.apache.calcite.plan.Contexts; +import org.apache.calcite.rel.core.RelFactories.FilterFactory; +import org.apache.calcite.rel.core.RelFactories.JoinFactory; +import org.apache.calcite.rel.core.RelFactories.ProjectFactory; +import org.apache.calcite.tools.RelBuilder; +import org.apache.calcite.tools.RelBuilderFactory; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; + +/** + * Contains factory interface and default implementation for creating various + * rel nodes. + */ +public class HiveRelFactories { + public static final ProjectFactory DEFAULT_PROJECT_FACTORY = + HiveProject.DEFAULT_PROJECT_FACTORY; + + public static final FilterFactory DEFAULT_FILTER_FACTORY = + HiveFilter.DEFAULT_FILTER_FACTORY; + + public static final JoinFactory DEFAULT_JOIN_FACTORY = + HiveJoin.HIVE_JOIN_FACTORY; + + /** A {@link RelBuilderFactory} that creates a {@link RelBuilder} that will + * create logical relational expressions for everything. */ + public static final RelBuilderFactory LOGICAL_BUILDER = + RelBuilder.proto( + Contexts.of(DEFAULT_PROJECT_FACTORY, + DEFAULT_FILTER_FACTORY, + DEFAULT_JOIN_FACTORY)); + + private HiveRelFactories() { + } + +} + +// End HiveRelFactories.java + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java new file mode 100644 index 0000000..bbf518d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.calcite; + +import java.util.HashSet; +import java.util.List; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagate; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.RexNodeConverter; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; + +public class HiveRexExecutorImpl implements RelOptPlanner.Executor { + + private final RelOptCluster cluster; + + public HiveRexExecutorImpl(RelOptCluster cluster) { + this.cluster = cluster; + } + + @Override + public void reduce(RexBuilder rexBuilder, List constExps, List reducedValues) { + RexNodeConverter rexNodeConverter = new RexNodeConverter(cluster); + for (RexNode rexNode : constExps) { + // initialize the converter + ExprNodeConverter converter = new ExprNodeConverter("", null, null, null, + new HashSet(), cluster.getTypeFactory()); + // convert RexNode to ExprNodeGenericFuncDesc + ExprNodeDesc expr = rexNode.accept(converter); + if (expr instanceof ExprNodeGenericFuncDesc) { + // folding the constant + ExprNodeDesc constant = ConstantPropagateProcFactory + .foldExpr((ExprNodeGenericFuncDesc) expr); + if (constant != null) { + try { + // convert constant back to RexNode + reducedValues.add(rexNodeConverter.convert((ExprNodeConstantDesc) constant)); + } catch (Exception e) { + e.printStackTrace(); + reducedValues.add(rexNode); + } + } else { + reducedValues.add(rexNode); + } + } else { + reducedValues.add(rexNode); + } + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java new file mode 100644 index 0000000..1e70a99 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java @@ -0,0 +1,986 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptPredicateList; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Calc; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.core.JoinInfo; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rel.logical.LogicalCalc; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexCorrelVariable; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.rex.RexFieldAccess; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexLocalRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexOver; +import org.apache.calcite.rex.RexProgram; +import org.apache.calcite.rex.RexProgramBuilder; +import org.apache.calcite.rex.RexRangeRef; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.rex.RexVisitorImpl; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.fun.SqlRowOperator; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.tools.RelBuilderFactory; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.Stacks; +import org.apache.calcite.util.Util; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +/** + * Collection of planner rules that apply various simplifying transformations on + * RexNode trees. Currently, there are two transformations: + * + *
    + *
  • Constant reduction, which evaluates constant subtrees, replacing them + * with a corresponding RexLiteral + *
  • Removal of redundant casts, which occurs when the argument into the cast + * is the same as the type of the resulting cast expression + *
+ */ +public abstract class HiveReduceExpressionsRule extends RelOptRule { + //~ Static fields/initializers --------------------------------------------- + + /** + * Regular expression that matches the description of all instances of this + * rule and {@link ValuesReduceRule} also. Use + * it to prevent the planner from invoking these rules. + */ + public static final Pattern EXCLUSION_PATTERN = + Pattern.compile("Reduce(Expressions|Values)Rule.*"); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.logical.HiveFilter}. + */ + public static final HiveReduceExpressionsRule FILTER_INSTANCE = + new FilterReduceExpressionsRule(HiveFilter.class, HiveRelFactories.LOGICAL_BUILDER); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.logical.HiveProject}. + */ + public static final HiveReduceExpressionsRule PROJECT_INSTANCE = + new ProjectReduceExpressionsRule(HiveProject.class, HiveRelFactories.LOGICAL_BUILDER); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.core.HiveJoin}. + */ + public static final HiveReduceExpressionsRule JOIN_INSTANCE = + new JoinReduceExpressionsRule(HiveJoin.class, HiveRelFactories.LOGICAL_BUILDER); + + /** + * Singleton rule that reduces constants inside a + * {@link org.apache.calcite.rel.logical.LogicalCalc}. + */ + public static final HiveReduceExpressionsRule CALC_INSTANCE = + new CalcReduceExpressionsRule(LogicalCalc.class, HiveRelFactories.LOGICAL_BUILDER); + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Filter}. + * If the condition is a constant, the filter is removed (if TRUE) or replaced with + * an empty {@link org.apache.calcite.rel.core.Values} (if FALSE or NULL). + */ + public static class FilterReduceExpressionsRule extends HiveReduceExpressionsRule { + + public FilterReduceExpressionsRule(Class filterClass, + RelBuilderFactory relBuilderFactory) { + super(filterClass, relBuilderFactory, "HiveReduceExpressionsRule(Filter)"); + } + + @Override public void onMatch(RelOptRuleCall call) { + final Filter filter = call.rel(0); + final List expList = + Lists.newArrayList(filter.getCondition()); + RexNode newConditionExp; + boolean reduced; + final RelOptPredicateList predicates = + RelMetadataQuery.getPulledUpPredicates(filter.getInput()); + if (reduceExpressions(filter, expList, predicates)) { + assert expList.size() == 1; + newConditionExp = expList.get(0); + reduced = true; + } else { + // No reduction, but let's still test the original + // predicate to see if it was already a constant, + // in which case we don't need any runtime decision + // about filtering. + newConditionExp = filter.getCondition(); + reduced = false; + } + if (newConditionExp.isAlwaysTrue()) { + call.transformTo( + filter.getInput()); + } + // TODO: support LogicalValues + else if (newConditionExp instanceof RexLiteral + || RexUtil.isNullLiteral(newConditionExp, true)) { + // call.transformTo(call.builder().values(filter.getRowType()).build()); + return; + } + else if (reduced) { + call.transformTo(call.builder(). + push(filter.getInput()).filter(expList.get(0)).build()); + } else { + if (newConditionExp instanceof RexCall) { + RexCall rexCall = (RexCall) newConditionExp; + boolean reverse = + rexCall.getOperator() + == SqlStdOperatorTable.NOT; + if (reverse) { + rexCall = (RexCall) rexCall.getOperands().get(0); + } + reduceNotNullableFilter(call, filter, rexCall, reverse); + } + return; + } + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(filter, 0.0); + } + + private void reduceNotNullableFilter( + RelOptRuleCall call, + Filter filter, + RexCall rexCall, + boolean reverse) { + // If the expression is a IS [NOT] NULL on a non-nullable + // column, then we can either remove the filter or replace + // it with an Empty. + boolean alwaysTrue; + switch (rexCall.getKind()) { + case IS_NULL: + case IS_UNKNOWN: + alwaysTrue = false; + break; + case IS_NOT_NULL: + alwaysTrue = true; + break; + default: + return; + } + if (reverse) { + alwaysTrue = !alwaysTrue; + } + RexNode operand = rexCall.getOperands().get(0); + if (operand instanceof RexInputRef) { + RexInputRef inputRef = (RexInputRef) operand; + if (!inputRef.getType().isNullable()) { + if (alwaysTrue) { + call.transformTo(filter.getInput()); + } else { + // TODO: support LogicalValues + // call.transformTo(call.builder().values(filter.getRowType()).build()); + return; + } + } + } + } + } + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Project}. + */ + public static class ProjectReduceExpressionsRule extends HiveReduceExpressionsRule { + + public ProjectReduceExpressionsRule(Class projectClass, + RelBuilderFactory relBuilderFactory) { + super(projectClass, relBuilderFactory, "HiveReduceExpressionsRule(Project)"); + } + + public boolean matches(RelOptRuleCall call) { + Project project = call.rel(0); + HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class); + + // If this operator has been visited already by the rule, + // we do not need to apply the optimization + if (registry != null && registry.getVisited(this).contains(project)) { + return false; + } + + return true; + } + + @Override public void onMatch(RelOptRuleCall call) { + Project project = call.rel(0); + // Register that we have visited this operator in this rule + HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class); + if (registry != null) { + registry.registerVisited(this, project); + } + final RelOptPredicateList predicates = + RelMetadataQuery.getPulledUpPredicates(project.getInput()); + final List expList = + Lists.newArrayList(project.getProjects()); + if (reduceExpressions(project, expList, predicates)) { + RelNode newProject = call.builder().push(project.getInput()) + .project(expList, project.getRowType().getFieldNames()).build(); + if (registry != null) { + registry.registerVisited(this, newProject); + } + call.transformTo(newProject); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(project, 0.0); + } + } + } + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.HiveJoin}. + */ + public static class JoinReduceExpressionsRule extends HiveReduceExpressionsRule { + + public JoinReduceExpressionsRule(Class joinClass, + RelBuilderFactory relBuilderFactory) { + super(joinClass, relBuilderFactory, "HiveReduceExpressionsRule(HiveJoin)"); + } + + @Override public void onMatch(RelOptRuleCall call) { + final HiveJoin join = call.rel(0); + final List expList = Lists.newArrayList(join.getCondition()); + final int fieldCount = join.getLeft().getRowType().getFieldCount(); + final RelOptPredicateList leftPredicates = + RelMetadataQuery.getPulledUpPredicates(join.getLeft()); + final RelOptPredicateList rightPredicates = + RelMetadataQuery.getPulledUpPredicates(join.getRight()); + final RelOptPredicateList predicates = + leftPredicates.union(rightPredicates.shift(fieldCount)); + if (!reduceExpressions(join, expList, predicates)) { + return; + } + final JoinInfo joinInfo = JoinInfo.of(join.getLeft(), join.getRight(), expList.get(0)); + if (!joinInfo.isEqui()) { + // This kind of join must be an equi-join, and the condition is + // no longer an equi-join. SemiJoin is an example of this. + return; + } + call.transformTo( + join.copy( + join.getTraitSet(), + expList.get(0), + join.getLeft(), + join.getRight(), + join.getJoinType(), + join.isSemiJoinDone())); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(join, 0.0); + } + } + + /** + * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Calc}. + */ + public static class CalcReduceExpressionsRule extends HiveReduceExpressionsRule { + + public CalcReduceExpressionsRule(Class calcClass, + RelBuilderFactory relBuilderFactory) { + super(calcClass, relBuilderFactory, "HiveReduceExpressionsRule(Calc)"); + } + + @Override public void onMatch(RelOptRuleCall call) { + Calc calc = call.rel(0); + RexProgram program = calc.getProgram(); + final List exprList = program.getExprList(); + + // Form a list of expressions with sub-expressions fully expanded. + final List expandedExprList = Lists.newArrayList(); + final RexShuttle shuttle = + new RexShuttle() { + public RexNode visitLocalRef(RexLocalRef localRef) { + return expandedExprList.get(localRef.getIndex()); + } + }; + for (RexNode expr : exprList) { + expandedExprList.add(expr.accept(shuttle)); + } + final RelOptPredicateList predicates = RelOptPredicateList.EMPTY; + if (reduceExpressions(calc, expandedExprList, predicates)) { + final RexProgramBuilder builder = + new RexProgramBuilder( + calc.getInput().getRowType(), + calc.getCluster().getRexBuilder()); + final List list = Lists.newArrayList(); + for (RexNode expr : expandedExprList) { + list.add(builder.registerInput(expr)); + } + if (program.getCondition() != null) { + final int conditionIndex = + program.getCondition().getIndex(); + final RexNode newConditionExp = + expandedExprList.get(conditionIndex); + if (newConditionExp.isAlwaysTrue()) { + // condition is always TRUE - drop it + } else if (newConditionExp instanceof RexLiteral + || RexUtil.isNullLiteral(newConditionExp, true)) { + // condition is always NULL or FALSE - replace calc + // with empty + call.transformTo(call.builder().values(calc.getRowType()).build()); + return; + } else { + builder.addCondition(list.get(conditionIndex)); + } + } + int k = 0; + for (RexLocalRef projectExpr : program.getProjectList()) { + final int index = projectExpr.getIndex(); + builder.addProject( + list.get(index).getIndex(), + program.getOutputRowType().getFieldNames().get(k++)); + } + call.transformTo( + calc.copy(calc.getTraitSet(), calc.getInput(), builder.getProgram())); + + // New plan is absolutely better than old plan. + call.getPlanner().setImportance(calc, 0.0); + } + } + } + + //~ Constructors ----------------------------------------------------------- + + /** + * Creates a HiveReduceExpressionsRule. + * + * @param clazz class of rels to which this rule should apply + */ + protected HiveReduceExpressionsRule(Class clazz, + RelBuilderFactory relBuilderFactory, String desc) { + super(operand(clazz, any()), relBuilderFactory, desc); + } + + //~ Methods ---------------------------------------------------------------- + + /** + * Reduces a list of expressions. + * + * @param rel Relational expression + * @param expList List of expressions, modified in place + * @param predicates Constraints known to hold on input expressions + * @return whether reduction found something to change, and succeeded + */ + protected static boolean reduceExpressions(RelNode rel, List expList, + RelOptPredicateList predicates) { + RexBuilder rexBuilder = rel.getCluster().getRexBuilder(); + + // Replace predicates on CASE to CASE on predicates. + new CaseShuttle().mutate(expList); + + // Find reducible expressions. + final List constExps = Lists.newArrayList(); + List addCasts = Lists.newArrayList(); + final List removableCasts = Lists.newArrayList(); + final ImmutableMap constants = + predicateConstants(predicates); + findReducibleExps(rel.getCluster().getTypeFactory(), expList, constants, + constExps, addCasts, removableCasts); + if (constExps.isEmpty() && removableCasts.isEmpty()) { + return false; + } + + // Remove redundant casts before reducing constant expressions. + // If the argument to the redundant cast is a reducible constant, + // reducing that argument to a constant first will result in not being + // able to locate the original cast expression. + if (!removableCasts.isEmpty()) { + final List reducedExprs = Lists.newArrayList(); + for (RexNode exp : removableCasts) { + RexCall call = (RexCall) exp; + reducedExprs.add(call.getOperands().get(0)); + } + RexReplacer replacer = + new RexReplacer( + rexBuilder, + removableCasts, + reducedExprs, + Collections.nCopies(removableCasts.size(), false)); + replacer.mutate(expList); + } + + if (constExps.isEmpty()) { + return true; + } + + final List constExps2 = Lists.newArrayList(constExps); + if (!constants.isEmpty()) { + //noinspection unchecked + final List> pairs = + (List>) (List) + Lists.newArrayList(constants.entrySet()); + RexReplacer replacer = + new RexReplacer( + rexBuilder, + Pair.left(pairs), + Pair.right(pairs), + Collections.nCopies(pairs.size(), false)); + replacer.mutate(constExps2); + } + + // Compute the values they reduce to. + RelOptPlanner.Executor executor = + rel.getCluster().getPlanner().getExecutor(); + if (executor == null) { + // Cannot reduce expressions: caller has not set an executor in their + // environment. Caller should execute something like the following before + // invoking the planner: + // + // final RexExecutorImpl executor = + // new RexExecutorImpl(Schemas.createDataContext(null)); + // rootRel.getCluster().getPlanner().setExecutor(executor); + return false; + } + + final List reducedValues = Lists.newArrayList(); + executor.reduce(rexBuilder, constExps2, reducedValues); + + // For Project, we have to be sure to preserve the result + // types, so always cast regardless of the expression type. + // For other RelNodes like Filter, in general, this isn't necessary, + // and the presence of casts could hinder other rules such as sarg + // analysis, which require bare literals. But there are special cases, + // like when the expression is a UDR argument, that need to be + // handled as special cases. + if (rel instanceof Project) { + addCasts = Collections.nCopies(reducedValues.size(), true); + } + + RexReplacer replacer = + new RexReplacer( + rexBuilder, + constExps, + reducedValues, + addCasts); + replacer.mutate(expList); + return true; + } + + /** + * Locates expressions that can be reduced to literals or converted to + * expressions with redundant casts removed. + * + * @param typeFactory Type factory + * @param exps list of candidate expressions to be examined for + * reduction + * @param constants List of expressions known to be constant + * @param constExps returns the list of expressions that can be constant + * reduced + * @param addCasts indicator for each expression that can be constant + * reduced, whether a cast of the resulting reduced + * expression is potentially necessary + * @param removableCasts returns the list of cast expressions where the cast + */ + protected static void findReducibleExps(RelDataTypeFactory typeFactory, + List exps, ImmutableMap constants, + List constExps, List addCasts, + List removableCasts) { + ReducibleExprLocator gardener = + new ReducibleExprLocator(typeFactory, constants, constExps, + addCasts, removableCasts); + for (RexNode exp : exps) { + gardener.analyze(exp); + } + assert constExps.size() == addCasts.size(); + } + + protected static ImmutableMap predicateConstants( + RelOptPredicateList predicates) { + // We cannot use an ImmutableMap.Builder here. If there are multiple entries + // with the same key (e.g. "WHERE deptno = 1 AND deptno = 2"), it doesn't + // matter which we take, so the latter will replace the former. + // The basic idea is to find all the pairs of RexNode = RexLiteral + // (1) If 'predicates' contain a non-EQUALS, we bail out. + // (2) It is OK if a RexNode is equal to the same RexLiteral several times, + // (e.g. "WHERE deptno = 1 AND deptno = 1") + // (3) It will return false if there are inconsistent constraints (e.g. + // "WHERE deptno = 1 AND deptno = 2") + final Map map = new HashMap<>(); + final Set excludeSet = new HashSet<>(); + for (RexNode predicate : predicates.pulledUpPredicates) { + gatherConstraints(map, predicate, excludeSet); + } + final ImmutableMap.Builder builder = + ImmutableMap.builder(); + for (Map.Entry entry : map.entrySet()) { + RexNode rexNode = entry.getKey(); + if (!overlap(rexNode, excludeSet)) { + builder.put(rexNode, entry.getValue()); + } + } + return builder.build(); + } + + private static boolean overlap(RexNode rexNode, Set set) { + if (rexNode instanceof RexCall) { + for (RexNode r : ((RexCall) rexNode).getOperands()) { + if (overlap(r, set)) { + return true; + } + } + return false; + } else { + return set.contains(rexNode); + } + } + + /** Tries to decompose the RexNode which is a RexCall into non-literal + * RexNodes. */ + private static void decompose(Set set, RexNode rexNode) { + if (rexNode instanceof RexCall) { + for (RexNode r : ((RexCall) rexNode).getOperands()) { + decompose(set, r); + } + } else if (!(rexNode instanceof RexLiteral)) { + set.add(rexNode); + } + } + + private static void gatherConstraints(Map map, + RexNode predicate, Set excludeSet) { + if (predicate.getKind() != SqlKind.EQUALS) { + decompose(excludeSet, predicate); + return; + } + final List operands = ((RexCall) predicate).getOperands(); + if (operands.size() != 2) { + decompose(excludeSet, predicate); + return; + } + // if it reaches here, we have rexNode equals rexNode + final RexNode left = operands.get(0); + final RexNode right = operands.get(1); + // note that literals are immutable too and they can only be compared through + // values. + if (right instanceof RexLiteral && !excludeSet.contains(left)) { + RexLiteral existedValue = map.get(left); + if (existedValue == null) { + map.put(left, (RexLiteral) right); + } else { + if (!existedValue.getValue().equals(((RexLiteral) right).getValue())) { + // we found conflict values. + map.remove(left); + excludeSet.add(left); + } + } + } else if (left instanceof RexLiteral && !excludeSet.contains(right)) { + RexLiteral existedValue = map.get(right); + if (existedValue == null) { + map.put(right, (RexLiteral) left); + } else { + if (!existedValue.getValue().equals(((RexLiteral) left).getValue())) { + map.remove(right); + excludeSet.add(right); + } + } + } + } + + /** Pushes predicates into a CASE. + * + *

We have a loose definition of 'predicate': any boolean expression will + * do, except CASE. For example '(CASE ...) = 5' or '(CASE ...) IS NULL'. + */ + protected static RexCall pushPredicateIntoCase(RexCall call) { + if (call.getType().getSqlTypeName() != SqlTypeName.BOOLEAN) { + return call; + } + switch (call.getKind()) { + case CASE: + case AND: + case OR: + return call; // don't push CASE into CASE! + } + int caseOrdinal = -1; + final List operands = call.getOperands(); + for (int i = 0; i < operands.size(); i++) { + RexNode operand = operands.get(i); + switch (operand.getKind()) { + case CASE: + caseOrdinal = i; + } + } + if (caseOrdinal < 0) { + return call; + } + // Convert + // f(CASE WHEN p1 THEN v1 ... END, arg) + // to + // CASE WHEN p1 THEN f(v1, arg) ... END + final RexCall case_ = (RexCall) operands.get(caseOrdinal); + final List nodes = new ArrayList<>(); + for (int i = 0; i < case_.getOperands().size(); i++) { + RexNode node = case_.getOperands().get(i); + if (!RexUtil.isCasePredicate(case_, i)) { + node = substitute(call, caseOrdinal, node); + } + nodes.add(node); + } + return case_.clone(call.getType(), nodes); + } + + /** Converts op(arg0, ..., argOrdinal, ..., argN) to op(arg0,..., node, ..., argN). */ + protected static RexNode substitute(RexCall call, int ordinal, RexNode node) { + final List newOperands = Lists.newArrayList(call.getOperands()); + newOperands.set(ordinal, node); + return call.clone(call.getType(), newOperands); + } + + //~ Inner Classes ---------------------------------------------------------- + + /** + * Replaces expressions with their reductions. Note that we only have to + * look for RexCall, since nothing else is reducible in the first place. + */ + protected static class RexReplacer extends RexShuttle { + private final RexBuilder rexBuilder; + private final List reducibleExps; + private final List reducedValues; + private final List addCasts; + + RexReplacer( + RexBuilder rexBuilder, + List reducibleExps, + List reducedValues, + List addCasts) { + this.rexBuilder = rexBuilder; + this.reducibleExps = reducibleExps; + this.reducedValues = reducedValues; + this.addCasts = addCasts; + } + + @Override public RexNode visitInputRef(RexInputRef inputRef) { + RexNode node = visit(inputRef); + if (node == null) { + return super.visitInputRef(inputRef); + } + return node; + } + + @Override public RexNode visitCall(RexCall call) { + RexNode node = visit(call); + if (node != null) { + return node; + } + node = super.visitCall(call); + if (node != call) { + node = RexUtil.simplify(rexBuilder, node); + } + return node; + } + + private RexNode visit(final RexNode call) { + int i = reducibleExps.indexOf(call); + if (i == -1) { + return null; + } + RexNode replacement = reducedValues.get(i); + if (addCasts.get(i) + && (replacement.getType() != call.getType())) { + // Handle change from nullable to NOT NULL by claiming + // that the result is still nullable, even though + // we know it isn't. + // + // Also, we cannot reduce CAST('abc' AS VARCHAR(4)) to 'abc'. + // If we make 'abc' of type VARCHAR(4), we may later encounter + // the same expression in a Project's digest where it has + // type VARCHAR(3), and that's wrong. + replacement = rexBuilder.makeCast(call.getType(), replacement, true); + } + return replacement; + } + } + + /** + * Helper class used to locate expressions that either can be reduced to + * literals or contain redundant casts. + */ + protected static class ReducibleExprLocator extends RexVisitorImpl { + /** Whether an expression is constant, and if so, whether it can be + * reduced to a simpler constant. */ + enum Constancy { + NON_CONSTANT, REDUCIBLE_CONSTANT, IRREDUCIBLE_CONSTANT + } + + private final RelDataTypeFactory typeFactory; + + private final List stack; + + private final ImmutableMap constants; + + private final List constExprs; + + private final List addCasts; + + private final List removableCasts; + + private final List parentCallTypeStack; + + ReducibleExprLocator(RelDataTypeFactory typeFactory, + ImmutableMap constants, List constExprs, + List addCasts, List removableCasts) { + // go deep + super(true); + this.typeFactory = typeFactory; + this.constants = constants; + this.constExprs = constExprs; + this.addCasts = addCasts; + this.removableCasts = removableCasts; + this.stack = Lists.newArrayList(); + this.parentCallTypeStack = Lists.newArrayList(); + } + + public void analyze(RexNode exp) { + assert stack.isEmpty(); + + exp.accept(this); + + // Deal with top of stack + assert stack.size() == 1; + assert parentCallTypeStack.isEmpty(); + Constancy rootConstancy = stack.get(0); + if (rootConstancy == Constancy.REDUCIBLE_CONSTANT) { + // The entire subtree was constant, so add it to the result. + addResult(exp); + } + stack.clear(); + } + + private Void pushVariable() { + stack.add(Constancy.NON_CONSTANT); + return null; + } + + private void addResult(RexNode exp) { + // Cast of literal can't be reduced, so skip those (otherwise we'd + // go into an infinite loop as we add them back). + if (exp.getKind() == SqlKind.CAST) { + RexCall cast = (RexCall) exp; + RexNode operand = cast.getOperands().get(0); + if (operand instanceof RexLiteral) { + return; + } + } + constExprs.add(exp); + + // In the case where the expression corresponds to a UDR argument, + // we need to preserve casts. Note that this only applies to + // the topmost argument, not expressions nested within the UDR + // call. + // + // REVIEW zfong 6/13/08 - Are there other expressions where we + // also need to preserve casts? + if (parentCallTypeStack.isEmpty()) { + addCasts.add(false); + } else { + addCasts.add(isUdf(Stacks.peek(parentCallTypeStack))); + } + } + + private Boolean isUdf(SqlOperator operator) { + // return operator instanceof UserDefinedRoutine + return false; + } + + public Void visitInputRef(RexInputRef inputRef) { + if (constants.containsKey(inputRef)) { + stack.add(Constancy.REDUCIBLE_CONSTANT); + return null; + } + return pushVariable(); + } + + public Void visitLiteral(RexLiteral literal) { + stack.add(Constancy.IRREDUCIBLE_CONSTANT); + return null; + } + + public Void visitOver(RexOver over) { + // assume non-constant (running SUM(1) looks constant but isn't) + analyzeCall(over, Constancy.NON_CONSTANT); + return null; + } + + public Void visitCorrelVariable(RexCorrelVariable correlVariable) { + return pushVariable(); + } + + public Void visitCall(RexCall call) { + // assume REDUCIBLE_CONSTANT until proven otherwise + analyzeCall(call, Constancy.REDUCIBLE_CONSTANT); + return null; + } + + private void analyzeCall(RexCall call, Constancy callConstancy) { + Stacks.push(parentCallTypeStack, call.getOperator()); + + // visit operands, pushing their states onto stack + super.visitCall(call); + + // look for NON_CONSTANT operands + int operandCount = call.getOperands().size(); + List operandStack = Util.last(stack, operandCount); + for (Constancy operandConstancy : operandStack) { + if (operandConstancy == Constancy.NON_CONSTANT) { + callConstancy = Constancy.NON_CONSTANT; + } + } + + // Even if all operands are constant, the call itself may + // be non-deterministic. + if (!call.getOperator().isDeterministic()) { + callConstancy = Constancy.NON_CONSTANT; + } else if (call.getOperator().isDynamicFunction()) { + // We can reduce the call to a constant, but we can't + // cache the plan if the function is dynamic. + // For now, treat it same as non-deterministic. + callConstancy = Constancy.NON_CONSTANT; + } + + // Row operator itself can't be reduced to a literal, but if + // the operands are constants, we still want to reduce those + if ((callConstancy == Constancy.REDUCIBLE_CONSTANT) + && (call.getOperator() instanceof SqlRowOperator)) { + callConstancy = Constancy.NON_CONSTANT; + } + + if (callConstancy == Constancy.NON_CONSTANT) { + // any REDUCIBLE_CONSTANT children are now known to be maximal + // reducible subtrees, so they can be added to the result + // list + for (int iOperand = 0; iOperand < operandCount; ++iOperand) { + Constancy constancy = operandStack.get(iOperand); + if (constancy == Constancy.REDUCIBLE_CONSTANT) { + addResult(call.getOperands().get(iOperand)); + } + } + + // if this cast expression can't be reduced to a literal, + // then see if we can remove the cast + if (call.getOperator() == SqlStdOperatorTable.CAST) { + reduceCasts(call); + } + } + + // pop operands off of the stack + operandStack.clear(); + + // pop this parent call operator off the stack + Stacks.pop(parentCallTypeStack, call.getOperator()); + + // push constancy result for this call onto stack + stack.add(callConstancy); + } + + private void reduceCasts(RexCall outerCast) { + List operands = outerCast.getOperands(); + if (operands.size() != 1) { + return; + } + RelDataType outerCastType = outerCast.getType(); + RelDataType operandType = operands.get(0).getType(); + if (operandType.equals(outerCastType)) { + removableCasts.add(outerCast); + return; + } + + // See if the reduction + // CAST((CAST x AS type) AS type NOT NULL) + // -> CAST(x AS type NOT NULL) + // applies. TODO jvs 15-Dec-2008: consider + // similar cases for precision changes. + if (!(operands.get(0) instanceof RexCall)) { + return; + } + RexCall innerCast = (RexCall) operands.get(0); + if (innerCast.getOperator() != SqlStdOperatorTable.CAST) { + return; + } + if (innerCast.getOperands().size() != 1) { + return; + } + RelDataType outerTypeNullable = + typeFactory.createTypeWithNullability(outerCastType, true); + RelDataType innerTypeNullable = + typeFactory.createTypeWithNullability(operandType, true); + if (outerTypeNullable != innerTypeNullable) { + return; + } + if (operandType.isNullable()) { + removableCasts.add(innerCast); + } + } + + public Void visitDynamicParam(RexDynamicParam dynamicParam) { + return pushVariable(); + } + + public Void visitRangeRef(RexRangeRef rangeRef) { + return pushVariable(); + } + + public Void visitFieldAccess(RexFieldAccess fieldAccess) { + return pushVariable(); + } + } + + /** Shuttle that pushes predicates into a CASE. */ + protected static class CaseShuttle extends RexShuttle { + @Override public RexNode visitCall(RexCall call) { + for (;;) { + call = (RexCall) super.visitCall(call); + final RexCall old = call; + call = pushPredicateIntoCase(call); + if (call == old) { + return call; + } + } + } + } +} + +// End HiveReduceExpressionsRule.java diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java index 425514d..7d39d4b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java @@ -150,9 +150,37 @@ static ASTNode literal(RexLiteral literal, boolean useTypeQualInLiteral) { switch (sqlType) { case BINARY: + case DATE: + case TIME: + case TIMESTAMP: + case INTERVAL_YEAR_MONTH: + case INTERVAL_DAY_TIME: + if (literal.getValue() == null) { + return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); + } + break; + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + case DOUBLE: + case DECIMAL: + case FLOAT: + case REAL: + case VARCHAR: + case CHAR: + case BOOLEAN: + if (literal.getValue3() == null) { + return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); + } + } + + switch (sqlType) { + case BINARY: ByteString bs = (ByteString) literal.getValue(); - val = bs.byteAt(0); - type = HiveParser.BigintLiteral; + val = bs.toString(); + val = "'" + val + "'"; + type = HiveParser.StringLiteral; break; case TINYINT: if (useTypeQualInLiteral) { @@ -218,7 +246,7 @@ static ASTNode literal(RexLiteral literal, boolean useTypeQualInLiteral) { case TIMESTAMP: { val = literal.getValue(); type = HiveParser.TOK_TIMESTAMPLITERAL; - DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); val = df.format(((Calendar) val).getTime()); val = "'" + val + "'"; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java index a8b6300..35eb235 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java @@ -538,7 +538,7 @@ public ASTNode visitCall(RexCall call) { SqlOperator op = call.getOperator(); List astNodeLst = new LinkedList(); if (op.kind == SqlKind.CAST) { - HiveToken ht = TypeConverter.hiveToken(call.getType()); + HiveToken ht = TypeConverter.hiveToken(call); ASTBuilder astBldr = ASTBuilder.construct(ht.type, ht.text); if (ht.args != null) { for (String castArg : ht.args) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java index b42e78f..8aa9731 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java @@ -40,6 +40,7 @@ import org.apache.calcite.rex.RexWindow; import org.apache.calcite.rex.RexWindowBound; import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -129,10 +130,7 @@ public ExprNodeDesc visitCall(RexCall call) { } // If Call is a redundant cast then bail out. Ex: cast(true)BOOLEAN - if (call.isA(SqlKind.CAST) - && (call.operands.size() == 1) - && SqlTypeUtil.equalSansNullability(dTFactory, call.getType(), - call.operands.get(0).getType())) { + if (call.isA(SqlKind.CAST) && (call.operands.size() == 1)) { return args.get(0); } else { GenericUDF hiveUdf = SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java index 631a4ca..6fc8f9d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Map; +import org.apache.calcite.avatica.util.ByteString; import org.apache.calcite.avatica.util.TimeUnit; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rel.RelNode; @@ -71,10 +72,12 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseNumeric; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFTimestamp; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToIntervalDayTime; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar; import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; @@ -112,6 +115,10 @@ private InputCtx(RelDataType calciteInpDataType, ImmutableMap h private final ImmutableList inputCtxs; private final boolean flattenExpr; + public RexNodeConverter(RelOptCluster cluster) { + this(cluster, new ArrayList(), false); + } + public RexNodeConverter(RelOptCluster cluster, RelDataType inpDataType, ImmutableMap nameToPosMap, int offset, boolean flattenExpr) { this.cluster = cluster; @@ -259,6 +266,9 @@ private RexNode handleExplicitCast(ExprNodeGenericFuncDesc func, List c GenericUDF udf = func.getGenericUDF(); if ((udf instanceof GenericUDFToChar) || (udf instanceof GenericUDFToVarchar) || (udf instanceof GenericUDFToDecimal) || (udf instanceof GenericUDFToDate) + // Calcite can not specify the scale for timestamp. As a result, all + // the millisecond part will be lost + || (udf instanceof GenericUDFTimestamp) || (udf instanceof GenericUDFToBinary) || castExprUsingUDFBridge(udf)) { castExpr = cluster.getRexBuilder().makeAbstractCast( TypeConverter.convert(func.getTypeInfo(), cluster.getTypeFactory()), @@ -321,6 +331,10 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx coi); RexNode calciteLiteral = null; + if (value == null) { + return cluster.getRexBuilder().makeLiteral(null, + cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true); + } // TODO: Verify if we need to use ConstantObjectInspector to unwrap data switch (hiveTypeCategory) { case BOOLEAN: @@ -378,6 +392,10 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), calciteDataType); break; case DOUBLE: + // TODO: The best solution is to support NaN in expression reduction. + if (Double.isInfinite((Double) value) || Double.isNaN((Double) value)) { + throw new CalciteSemanticException("Infinite or NaN", UnsupportedFeature.Invalid_decimal); + } calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), calciteDataType); break; case CHAR: @@ -417,19 +435,29 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1,1))); break; case INTERVAL_DAY_TIME: + // Calcite RexBuilder L525 divides value by the multiplier. + // Need to get CAlCITE-1020 in. + throw new CalciteSemanticException("INTERVAL_DAY_TIME is not well supported", + UnsupportedFeature.Invalid_interval); // Calcite day-time interval is millis value as BigDecimal // Seconds converted to millis - BigDecimal secsValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getTotalSeconds() * 1000); - // Nanos converted to millis - BigDecimal nanosValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getNanos(), 6); - calciteLiteral = rexBuilder.makeIntervalLiteral(secsValueBd.add(nanosValueBd), - new SqlIntervalQualifier(TimeUnit.DAY, TimeUnit.SECOND, new SqlParserPos(1,1))); - break; + // BigDecimal secsValueBd = BigDecimal + // .valueOf(((HiveIntervalDayTime) value).getTotalSeconds() * 1000); + // // Nanos converted to millis + // BigDecimal nanosValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) + // value).getNanos(), 6); + // calciteLiteral = + // rexBuilder.makeIntervalLiteral(secsValueBd.add(nanosValueBd), + // new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, new + // SqlParserPos(1, 1))); + // break; case VOID: calciteLiteral = cluster.getRexBuilder().makeLiteral(null, cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true); break; case BINARY: + calciteLiteral = cluster.getRexBuilder().makeBinaryLiteral(new ByteString((byte[]) value)); + break; case UNKNOWN: default: throw new RuntimeException("UnSupported Literal"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java index 2825f77..eeeedf8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java @@ -28,6 +28,7 @@ import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.sql.SqlIntervalQualifier; import org.apache.calcite.sql.type.SqlTypeName; @@ -321,9 +322,9 @@ public static TypeInfo convertPrimitiveType(RelDataType rType) { } /*********************** Convert Calcite Types To Hive Types ***********************/ - public static HiveToken hiveToken(RelDataType calciteType) { + public static HiveToken hiveToken(RexCall call) { HiveToken ht = null; - + RelDataType calciteType = call.getType(); switch (calciteType.getSqlTypeName()) { case CHAR: { ht = new HiveToken(HiveParser.TOK_CHAR, "TOK_CHAR", String.valueOf(calciteType.getPrecision())); @@ -343,6 +344,14 @@ public static HiveToken hiveToken(RelDataType calciteType) { .getPrecision()), String.valueOf(calciteType.getScale())); } break; + case BINARY: { + if (call.getOperands().get(0).getType().getSqlTypeName().getName().equals("BINARY")) { + ht = new HiveToken(HiveParser.Identifier, "unhex"); + } else { + ht = new HiveToken(HiveParser.Identifier, "binary"); + } + } + break; default: ht = calciteToHiveTypeNameMap.get(calciteType.getSqlTypeName().getName()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 5e43adc..54200d3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -40,6 +40,7 @@ import org.antlr.runtime.tree.TreeVisitorAction; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptPlanner.Executor; import org.apache.calcite.plan.RelOptQuery; import org.apache.calcite.plan.RelOptRule; import org.apache.calcite.plan.RelOptSchema; @@ -117,6 +118,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveDefaultRelMetadataProvider; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveHepPlannerContext; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexExecutorImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveVolcanoPlannerContext; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; @@ -151,6 +153,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePreFilteringRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectMergeRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectSortTransposeRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveReduceExpressionsRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRelFieldTrimmer; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortJoinReduceRule; @@ -871,9 +874,12 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // Create MD provider HiveDefaultRelMetadataProvider mdProvider = new HiveDefaultRelMetadataProvider(conf); + // Create executor + Executor executorProvider = new HiveRexExecutorImpl(cluster); + // 2. Apply pre-join order optimizations calcitePreCboPlan = applyPreJoinOrderingTransforms(calciteGenPlan, - mdProvider.getMetadataProvider()); + mdProvider.getMetadataProvider(), executorProvider); // 3. Apply join order optimizations: reordering MST algorithm // If join optimizations failed because of missing stats, we continue with @@ -1022,9 +1028,11 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu * original plan * @param mdProvider * meta data provider + * @param executorProvider + * executor * @return */ - private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProvider mdProvider) { + private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProvider mdProvider, Executor executorProvider) { // TODO: Decorelation of subquery should be done before attempting // Partition Pruning; otherwise Expression evaluation may try to execute // corelated sub query. @@ -1105,6 +1113,12 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv basePlan = hepPlan(basePlan, true, mdProvider, SemiJoinJoinTransposeRule.INSTANCE, SemiJoinFilterTransposeRule.INSTANCE, SemiJoinProjectTransposeRule.INSTANCE); + // Constant folding needs to be run after Transitive inference & Partition + // Pruning and Projection Pruning. + basePlan = hepPlan(basePlan, true, mdProvider, executorProvider, + HiveReduceExpressionsRule.PROJECT_INSTANCE, HiveReduceExpressionsRule.FILTER_INSTANCE, + HiveReduceExpressionsRule.JOIN_INSTANCE); + // 9. Apply Partition Pruning basePlan = hepPlan(basePlan, false, mdProvider, new HivePartitionPruneRule(conf)); @@ -1143,7 +1157,23 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv */ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider, RelOptRule... rules) { - return hepPlan(basePlan, followPlanChanges, mdProvider, + return hepPlan(basePlan, followPlanChanges, mdProvider, null, + HepMatchOrder.TOP_DOWN, rules); + } + + /** + * Run the HEP Planner with the given rule set. + * + * @param basePlan + * @param followPlanChanges + * @param mdProvider + * @param executorProvider + * @param rules + * @return optimized RelNode + */ + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, Executor executorProvider, RelOptRule... rules) { + return hepPlan(basePlan, followPlanChanges, mdProvider, executorProvider, HepMatchOrder.TOP_DOWN, rules); } @@ -1157,8 +1187,8 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, * @param rules * @return optimized RelNode */ - private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider, - HepMatchOrder order, RelOptRule... rules) { + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, HepMatchOrder order, RelOptRule... rules) { RelNode optimizedRelNode = basePlan; HepProgramBuilder programBuilder = new HepProgramBuilder(); @@ -1181,10 +1211,52 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadata basePlan.getCluster().setMetadataProvider( new CachingRelMetadataProvider(chainedProvider, planner)); - // Executor is required for constant-reduction rules; see [CALCITE-566] - final RexExecutorImpl executor = - new RexExecutorImpl(Schemas.createDataContext(null)); - basePlan.getCluster().getPlanner().setExecutor(executor); + planner.setRoot(basePlan); + optimizedRelNode = planner.findBestExp(); + + return optimizedRelNode; + } + + /** + * Run the HEP Planner with the given rule set. + * + * @param basePlan + * @param followPlanChanges + * @param mdProvider + * @param executorProvider + * @param order + * @param rules + * @return optimized RelNode + */ + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, Executor executorProvider, HepMatchOrder order, + RelOptRule... rules) { + + RelNode optimizedRelNode = basePlan; + HepProgramBuilder programBuilder = new HepProgramBuilder(); + if (followPlanChanges) { + programBuilder.addMatchOrder(order); + programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules)); + } else { + // TODO: Should this be also TOP_DOWN? + for (RelOptRule r : rules) + programBuilder.addRuleInstance(r); + } + + HiveRulesRegistry registry = new HiveRulesRegistry(); + HiveHepPlannerContext context = new HiveHepPlannerContext(registry); + HepPlanner planner = new HepPlanner(programBuilder.build(), context); + + List list = Lists.newArrayList(); + list.add(mdProvider); + planner.registerMetadataProviders(list); + RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); + basePlan.getCluster().setMetadataProvider( + new CachingRelMetadataProvider(chainedProvider, planner)); + + if (executorProvider != null) { + basePlan.getCluster().getPlanner().setExecutor(executorProvider); + } planner.setRoot(basePlan); optimizedRelNode = planner.findBestExp(); diff --git a/ql/src/test/queries/clientpositive/cbo_const.q b/ql/src/test/queries/clientpositive/cbo_const.q new file mode 100644 index 0000000..02c561d --- /dev/null +++ b/ql/src/test/queries/clientpositive/cbo_const.q @@ -0,0 +1,51 @@ +set hive.cbo.enable=true; + +select + interval_day_time('2 1:2:3'), + interval_day_time(cast('2 1:2:3' as string)), + interval_day_time(cast('2 1:2:3' as varchar(10))), + interval_day_time(cast('2 1:2:3' as char(10))), + interval_day_time('2 1:2:3') = interval '2 1:2:3' day to second +from src limit 1; + +select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; + +drop view t1; + +create table t1_new (key string, value string) partitioned by (ds string); + +insert overwrite table t1_new partition (ds = '2011-10-15') +select 'key1', 'value1' from src tablesample (1 rows); + +insert overwrite table t1_new partition (ds = '2011-10-16') +select 'key2', 'value2' from src tablesample (1 rows); + +create view t1 partitioned on (ds) as +select * from +( +select key, value, ds from t1_new +union all +select key, value, ds from t1_new +)subq; + +select * from t1 where ds = '2011-10-15'; + + +explain select array(1,2,3) from src; + +EXPLAIN +select key from (SELECT key from src where key = 1+3)s; + +select * from (select key from src where key = '1')subq; + +select '1'; + +select * from (select '1')subq; + +select * from (select key from src where false)subq; + +EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3); + diff --git a/ql/src/test/queries/clientpositive/constantfolding.q b/ql/src/test/queries/clientpositive/constantfolding.q new file mode 100644 index 0000000..e3a1814 --- /dev/null +++ b/ql/src/test/queries/clientpositive/constantfolding.q @@ -0,0 +1,87 @@ +set hive.optimize.ppd=true; + +-- SORT_QUERY_RESULTS + +select * from (select 'k2' as key, '1 ' as value from src limit 2)b +union all +select * from (select 'k3' as key, '' as value from src limit 2)b +union all +select * from (select 'k4' as key, ' ' as value from src limit 2)c; + + +drop table if exists union_all_bug_test_1; +drop table if exists union_all_bug_test_2; +create table if not exists union_all_bug_test_1 +( +f1 int, +f2 int +); + +create table if not exists union_all_bug_test_2 +( +f1 int +); + +insert into table union_all_bug_test_1 values (1,1); +insert into table union_all_bug_test_2 values (1); +insert into table union_all_bug_test_1 values (0,0); +insert into table union_all_bug_test_2 values (0); + + + +SELECT f1 +FROM ( + +SELECT +f1 +, if('helloworld' like '%hello%' ,f1,f2) as filter +FROM union_all_bug_test_1 + +union all + +select +f1 +, 0 as filter +from union_all_bug_test_2 +) A +WHERE (filter = 1 and f1 = 1); + + +select percentile(cast(key as bigint), array()) from src where false; + +select unbase64("0xe23") from src; + +SELECT key,randum123, h4 +FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a +WHERE a.h4 <= 3; + +select null from src; + +-- numRows: 2 rawDataSize: 80 +explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src; + +-- numRows: 2 rawDataSize: 112 +explain select cast("1970-12-31 15:59:58.174" as DATE) from src; + +CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; + +FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; + +EXPLAIN +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; + +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; diff --git a/ql/src/test/results/clientpositive/cast1.q.out b/ql/src/test/results/clientpositive/cast1.q.out index 0bdecba..48a0c14 100644 --- a/ql/src/test/results/clientpositive/cast1.q.out +++ b/ql/src/test/results/clientpositive/cast1.q.out @@ -105,11 +105,11 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] +POSTHOOK: Lineage: dest1.c3 SIMPLE [] +POSTHOOK: Lineage: dest1.c4 SIMPLE [] +POSTHOOK: Lineage: dest1.c5 SIMPLE [] POSTHOOK: Lineage: dest1.c6 EXPRESSION [] POSTHOOK: Lineage: dest1.c7 EXPRESSION [] PREHOOK: query: select dest1.* FROM dest1 diff --git a/ql/src/test/results/clientpositive/cbo_const.q.out b/ql/src/test/results/clientpositive/cbo_const.q.out new file mode 100644 index 0000000..c91af50 --- /dev/null +++ b/ql/src/test/results/clientpositive/cbo_const.q.out @@ -0,0 +1,334 @@ +PREHOOK: query: select + interval_day_time('2 1:2:3'), + interval_day_time(cast('2 1:2:3' as string)), + interval_day_time(cast('2 1:2:3' as varchar(10))), + interval_day_time(cast('2 1:2:3' as char(10))), + interval_day_time('2 1:2:3') = interval '2 1:2:3' day to second +from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select + interval_day_time('2 1:2:3'), + interval_day_time(cast('2 1:2:3' as string)), + interval_day_time(cast('2 1:2:3' as varchar(10))), + interval_day_time(cast('2 1:2:3' as char(10))), + interval_day_time('2 1:2:3') = interval '2 1:2:3' day to second +from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +2 01:02:03.000000000 2 01:02:03.000000000 2 01:02:03.000000000 2 01:02:03.000000000 true +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: drop view t1 +PREHOOK: type: DROPVIEW +POSTHOOK: query: drop view t1 +POSTHOOK: type: DROPVIEW +PREHOOK: query: create table t1_new (key string, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1_new +POSTHOOK: query: create table t1_new (key string, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1_new +PREHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-15') +select 'key1', 'value1' from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1_new@ds=2011-10-15 +POSTHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-15') +select 'key1', 'value1' from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1_new@ds=2011-10-15 +POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-15).key SIMPLE [] +POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-15).value SIMPLE [] +PREHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-16') +select 'key2', 'value2' from src tablesample (1 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1_new@ds=2011-10-16 +POSTHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-16') +select 'key2', 'value2' from src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1_new@ds=2011-10-16 +POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-16).key SIMPLE [] +POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-16).value SIMPLE [] +PREHOOK: query: create view t1 partitioned on (ds) as +select * from +( +select key, value, ds from t1_new +union all +select key, value, ds from t1_new +)subq +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1_new +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create view t1 partitioned on (ds) as +select * from +( +select key, value, ds from t1_new +union all +select key, value, ds from t1_new +)subq +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1_new +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: select * from t1 where ds = '2011-10-15' +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_new +PREHOOK: Input: default@t1_new@ds=2011-10-15 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 where ds = '2011-10-15' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_new +POSTHOOK: Input: default@t1_new@ds=2011-10-15 +#### A masked pattern was here #### +key1 value1 2011-10-15 +key1 value1 2011-10-15 +PREHOOK: query: explain select array(1,2,3) from src +PREHOOK: type: QUERY +POSTHOOK: query: explain select array(1,2,3) from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(1,2,3) (type: array) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN +select key from (SELECT key from src where key = 1+3)s +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +select key from (SELECT key from src where key = 1+3)s +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(key) = 4) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '4' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select key from src where key = '1')subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key from src where key = '1')subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: select '1' +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: select * from (select '1')subq +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select * from (select '1')subq +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: select * from (select key from src where false)subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key from src where false)subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT x.key, z.value, y.value +FROM src1 x JOIN src y ON (x.key = y.key and y.key = 1+2) +JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11+3) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: y + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(key) = 3) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: string) + outputColumnNames: _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '3' (type: string) + sort order: + + Map-reduce partition columns: '3' (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + TableScan + alias: x + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((UDFToDouble(key) = 3) and value is not null) (type: boolean) + Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: string) + outputColumnNames: _col1 + Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '3' (type: string) + sort order: + + Map-reduce partition columns: '3' (type: string) + Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col1, _col3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + TableScan + alias: z + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (((UDFToDouble(hr) = 14) and (ds = '2008-04-08')) and value is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: value (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col3 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col1, _col4 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '3' (type: string), _col4 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out index b14caa8..df26fbb 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out @@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"15","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"}]} 15 15 15 diff --git a/ql/src/test/results/clientpositive/constantfolding.q.out b/ql/src/test/results/clientpositive/constantfolding.q.out new file mode 100644 index 0000000..6b5325b --- /dev/null +++ b/ql/src/test/results/clientpositive/constantfolding.q.out @@ -0,0 +1,1303 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +select * from (select 'k2' as key, '1 ' as value from src limit 2)b +union all +select * from (select 'k3' as key, '' as value from src limit 2)b +union all +select * from (select 'k4' as key, ' ' as value from src limit 2)c +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select * from (select 'k2' as key, '1 ' as value from src limit 2)b +union all +select * from (select 'k3' as key, '' as value from src limit 2)b +union all +select * from (select 'k4' as key, ' ' as value from src limit 2)c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +k2 1 +k2 1 +k3 +k3 +k4 +k4 +PREHOOK: query: drop table if exists union_all_bug_test_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists union_all_bug_test_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists union_all_bug_test_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists union_all_bug_test_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists union_all_bug_test_1 +( +f1 int, +f2 int +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: query: create table if not exists union_all_bug_test_1 +( +f1 int, +f2 int +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@union_all_bug_test_1 +PREHOOK: query: create table if not exists union_all_bug_test_2 +( +f1 int +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: query: create table if not exists union_all_bug_test_2 +( +f1 int +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@union_all_bug_test_2 +PREHOOK: query: insert into table union_all_bug_test_1 values (1,1) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: query: insert into table union_all_bug_test_1 values (1,1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: Lineage: union_all_bug_test_1.f1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: union_all_bug_test_1.f2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into table union_all_bug_test_2 values (1) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: query: insert into table union_all_bug_test_2 values (1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: Lineage: union_all_bug_test_2.f1 EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table union_all_bug_test_1 values (0,0) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: query: insert into table union_all_bug_test_1 values (0,0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@union_all_bug_test_1 +POSTHOOK: Lineage: union_all_bug_test_1.f1 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: union_all_bug_test_1.f2 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into table union_all_bug_test_2 values (0) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: query: insert into table union_all_bug_test_2 values (0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@union_all_bug_test_2 +POSTHOOK: Lineage: union_all_bug_test_2.f1 EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: SELECT f1 +FROM ( + +SELECT +f1 +, if('helloworld' like '%hello%' ,f1,f2) as filter +FROM union_all_bug_test_1 + +union all + +select +f1 +, 0 as filter +from union_all_bug_test_2 +) A +WHERE (filter = 1 and f1 = 1) +PREHOOK: type: QUERY +PREHOOK: Input: default@union_all_bug_test_1 +PREHOOK: Input: default@union_all_bug_test_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT f1 +FROM ( + +SELECT +f1 +, if('helloworld' like '%hello%' ,f1,f2) as filter +FROM union_all_bug_test_1 + +union all + +select +f1 +, 0 as filter +from union_all_bug_test_2 +) A +WHERE (filter = 1 and f1 = 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_all_bug_test_1 +POSTHOOK: Input: default@union_all_bug_test_2 +#### A masked pattern was here #### +1 +PREHOOK: query: select percentile(cast(key as bigint), array()) from src where false +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select percentile(cast(key as bigint), array()) from src where false +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +NULL +PREHOOK: query: select unbase64("0xe23") from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select unbase64("0xe23") from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +�� +PREHOOK: query: SELECT key,randum123, h4 +FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a +WHERE a.h4 <= 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key,randum123, h4 +FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a +WHERE a.h4 <= 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +PREHOOK: query: select null from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select null from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- numRows: 2 rawDataSize: 80 +explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src +PREHOOK: type: QUERY +POSTHOOK: query: -- numRows: 2 rawDataSize: 80 +explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 1970-12-31 15:59:58.174 (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: -- numRows: 2 rawDataSize: 112 +explain select cast("1970-12-31 15:59:58.174" as DATE) from src +PREHOOK: type: QUERY +POSTHOOK: query: -- numRows: 2 rawDataSize: 112 +explain select cast("1970-12-31 15:59:58.174" as DATE) from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: null (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + ListSink + +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dest1 +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dest1 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dest1 +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +PREHOOK: query: EXPLAIN +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: dest1 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 1.098612288668 (type: double), null (type: void), null (type: void), 1.098612288668 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), 0.47712125472 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), null (type: void), -1.0 (type: double), 7.389056098931 (type: double), 8.0 (type: double), 8.0 (type: double), 0.125 (type: double), 8.0 (type: double), 2.0 (type: double), NaN (type: double), 1.0 (type: double), 1.0 (type: double), 8.0 (type: double), 8.0 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27 + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), + LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), + ROUND(LOG10(3.0),12), LOG10(0.0), LOG10(-1), ROUND(LOG(2, 3.0),12), + LOG(2, 0.0), LOG(2, -1), LOG(0.5, 2), LOG(2, 0.5), ROUND(EXP(2.0),12), + POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), + POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), + POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +1.098612288668 NULL NULL 1.098612288668 NULL NULL 1.584962500721 NULL NULL 0.47712125472 NULL NULL 1.584962500721 NULL NULL NULL -1.0 7.389056098931 8.0 8.0 0.125 8.0 2.0 NaN 1.0 1.0 8.0 8.0 diff --git a/ql/src/test/results/clientpositive/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/cross_product_check_1.q.out index e7d6900..f1f261a 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_1.q.out @@ -326,8 +326,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[10][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[9][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 diff --git a/ql/src/test/results/clientpositive/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cross_product_check_2.q.out index df438c9..e5780c6 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_2.q.out @@ -323,8 +323,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-5:MAPRED' is a cross product -Warning: Map Join MAPJOIN[29][bigTable=?] in task 'Stage-3:MAPRED' is a cross product +Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Stage-5:MAPRED' is a cross product +Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out index 24ac550..141bcd8 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out @@ -1560,7 +1560,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1642,8 +1642,9 @@ group by "day", key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 STAGE PLANS: Stage: Stage-1 @@ -1665,7 +1666,7 @@ STAGE PLANS: Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Map-reduce partition columns: _col0 (type: string) + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: @@ -1676,17 +1677,39 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.hive13_dp1 + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col2 (type: string) + sort order: + + Map-reduce partition columns: _col2 (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 Stage: Stage-0 Move Operator @@ -1700,7 +1723,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.hive13_dp1 - Stage: Stage-2 + Stage: Stage-3 Stats-Aggr Operator PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out index 6164a26..415e34f 100644 --- a/ql/src/test/results/clientpositive/groupby_ppd.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out @@ -33,11 +33,11 @@ STAGE PLANS: Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 1 (type: int), _col1 (type: int) - outputColumnNames: _col0, _col1 + expressions: _col1 (type: int) + outputColumnNames: _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int) + keys: 1 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -59,11 +59,11 @@ STAGE PLANS: Union Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 1 (type: int), _col1 (type: int) - outputColumnNames: _col0, _col1 + expressions: _col1 (type: int) + outputColumnNames: _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int) + keys: 1 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -79,7 +79,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col1 (type: int), _col0 (type: int) + expressions: _col1 (type: int), 1 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/input8.q.out b/ql/src/test/results/clientpositive/input8.q.out index 03857fc..f40e271 100644 --- a/ql/src/test/results/clientpositive/input8.q.out +++ b/ql/src/test/results/clientpositive/input8.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), UDFToInteger((UDFToDouble(key) - null)) (type: int), null (type: double) + expressions: null (type: void), UDFToInteger((UDFToDouble(key) - null)) (type: int), null (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -106,7 +106,7 @@ INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: dest1.c3 EXPRESSION [] PREHOOK: query: SELECT dest1.* FROM dest1 diff --git a/ql/src/test/results/clientpositive/input_part1.q.out b/ql/src/test/results/clientpositive/input_part1.q.out index d6f4d3e..c5c46af 100644 --- a/ql/src/test/results/clientpositive/input_part1.q.out +++ b/ql/src/test/results/clientpositive/input_part1.q.out @@ -365,8 +365,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: dest1.ds SIMPLE [] +POSTHOOK: Lineage: dest1.hr SIMPLE [] POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SELECT dest1.* FROM dest1 diff --git a/ql/src/test/results/clientpositive/input_part5.q.out b/ql/src/test/results/clientpositive/input_part5.q.out index f2d7335..c6ae2fd 100644 --- a/ql/src/test/results/clientpositive/input_part5.q.out +++ b/ql/src/test/results/clientpositive/input_part5.q.out @@ -114,7 +114,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@tmptable POSTHOOK: Lineage: tmptable.ds SIMPLE [(srcpart)x.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: tmptable.hr SIMPLE [(srcpart)x.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: tmptable.hr SIMPLE [] POSTHOOK: Lineage: tmptable.key SIMPLE [(srcpart)x.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tmptable.value SIMPLE [(srcpart)x.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select * from tmptable x sort by x.key,x.value,x.ds,x.hr diff --git a/ql/src/test/results/clientpositive/input_part6.q.out b/ql/src/test/results/clientpositive/input_part6.q.out index fa51cdf..c01d8af 100644 --- a/ql/src/test/results/clientpositive/input_part6.q.out +++ b/ql/src/test/results/clientpositive/input_part6.q.out @@ -19,7 +19,7 @@ STAGE PLANS: predicate: (UDFToDouble(ds) = 1996.0) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), '1996' (type: string), hr (type: string) + expressions: key (type: string), value (type: string), '1996.0' (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out index ec8b76b..a189f82 100644 --- a/ql/src/test/results/clientpositive/lineage2.q.out +++ b/ql/src/test/results/clientpositive/lineage2.q.out @@ -402,7 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"15","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]} 15 15 15 diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out index 747dc9a..a1b0267 100644 --- a/ql/src/test/results/clientpositive/lineage3.q.out +++ b/ql/src/test/results/clientpositive/lineage3.q.out @@ -196,7 +196,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 369 401 val_401 406 val_406 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out index be77ba8..059ff87 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out @@ -690,12 +690,10 @@ STAGE PLANS: predicate: (x = 484) (type: boolean) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 484 (type: int) - outputColumnNames: _col0 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col0 (type: int) + keys: 484 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out index 79348f3..0f8167b 100644 --- a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out @@ -1519,7 +1519,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) @@ -1635,7 +1635,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out index 17d527b..c31365c 100644 --- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out @@ -1613,6 +1613,21 @@ STAGE PLANS: sort order: + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: UDFToDouble(hr) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Target column: hr + Target Vertex: Map 1 Execution mode: llap Reducer 2 Execution mode: llap @@ -1984,20 +1999,16 @@ STAGE PLANS: alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Execution mode: llap Reducer 2 Execution mode: llap @@ -3031,6 +3042,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[5]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -3050,6 +3062,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[6]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -4553,20 +4566,16 @@ STAGE PLANS: alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Execution mode: llap Reducer 3 Execution mode: llap @@ -5262,6 +5271,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[5]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -5281,6 +5291,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[6]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -5544,6 +5555,21 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 54 Basic stats: COMPLETE Column stats: NONE Target column: hr Target Vertex: Map 1 + Select Operator + expressions: UDFToDouble(_col2) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart_orc + Partition key expr: UDFToDouble(hr) + Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Target column: hr + Target Vertex: Map 1 Execution mode: llap Reducer 2 Execution mode: uber diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out index 5fbc4f6..65a5943 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out @@ -1613,6 +1613,21 @@ STAGE PLANS: sort order: + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: UDFToDouble(hr) + Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE + Target column: hr + Target Vertex: Map 1 Execution mode: vectorized, llap Reducer 2 Execution mode: llap @@ -1984,20 +1999,16 @@ STAGE PLANS: alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Execution mode: llap Reducer 2 Execution mode: llap @@ -3031,6 +3042,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[5]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -3050,6 +3062,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[6]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -4553,20 +4566,16 @@ STAGE PLANS: alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: '2008-04-08' (type: string) - outputColumnNames: ds + Group By Operator + keys: '2008-04-08' (type: string) + mode: hash + outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Execution mode: llap Reducer 3 Execution mode: vectorized, llap @@ -5262,6 +5271,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[5]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -5281,6 +5291,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart + filterExpr: (ds) IN (RS[6]) (type: boolean) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) @@ -5544,6 +5555,21 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 720 Basic stats: COMPLETE Column stats: NONE Target column: hr Target Vertex: Map 1 + Select Operator + expressions: UDFToDouble(_col2) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart_orc + Partition key expr: UDFToDouble(hr) + Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE + Target column: hr + Target Vertex: Map 1 Execution mode: llap Reducer 2 Execution mode: uber diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out index f6d8388..90032fe 100644 --- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out @@ -768,23 +768,23 @@ STAGE PLANS: alias: orc_pred Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE @@ -834,26 +834,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_pred - filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) + filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out index b322ef1..7c5be6d 100644 --- a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out @@ -756,23 +756,23 @@ STAGE PLANS: alias: tbl_pred Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE @@ -822,26 +822,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_pred - filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) + filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean) - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean) + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: - - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/partition_multilevels.q.out b/ql/src/test/results/clientpositive/partition_multilevels.q.out index c1c8778..c30f1b9 100644 --- a/ql/src/test/results/clientpositive/partition_multilevels.q.out +++ b/ql/src/test/results/clientpositive/partition_multilevels.q.out @@ -990,12 +990,12 @@ STAGE PLANS: alias: partition_test_multilevel Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: '2222' (type: string), level2 (type: string), level3 (type: string) - outputColumnNames: level1, level2, level3 + expressions: level2 (type: string), level3 (type: string) + outputColumnNames: _col1, _col2 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: level1 (type: string), level2 (type: string), level3 (type: string) + keys: '2222' (type: string), _col1 (type: string), _col2 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE @@ -1578,12 +1578,12 @@ STAGE PLANS: alias: partition_test_multilevel Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: '2222' (type: string), level2 (type: string), level3 (type: string) - outputColumnNames: level1, level2, level3 + expressions: level2 (type: string), level3 (type: string) + outputColumnNames: _col1, _col2 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: level1 (type: string), level2 (type: string), level3 (type: string) + keys: '2222' (type: string), _col1 (type: string), _col2 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/pointlookup2.q.out b/ql/src/test/results/clientpositive/pointlookup2.q.out index a442425..1d7efe8 100644 --- a/ql/src/test/results/clientpositive/pointlookup2.q.out +++ b/ql/src/test/results/clientpositive/pointlookup2.q.out @@ -68,7 +68,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_t1 POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] POSTHOOK: Lineage: pcr_t2.key SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: from pcr_t1 @@ -83,8 +83,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_t1 POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: pcr_t2.key SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] +POSTHOOK: Lineage: pcr_t2.key SIMPLE [] POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain extended select key, value, ds diff --git a/ql/src/test/results/clientpositive/ppd_constant_expr.q.out b/ql/src/test/results/clientpositive/ppd_constant_expr.q.out index 17e2bab..55ec4a2 100644 --- a/ql/src/test/results/clientpositive/ppd_constant_expr.q.out +++ b/ql/src/test/results/clientpositive/ppd_constant_expr.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), UDFToInteger((UDFToDouble(key) - null)) (type: int), null (type: double) + expressions: null (type: void), UDFToInteger((UDFToDouble(key) - null)) (type: int), null (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -106,7 +106,7 @@ INSERT OVERWRITE TABLE ppd_constant_expr SELECT 4 + NULL, src1.key - NULL, NULL POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@ppd_constant_expr -POSTHOOK: Lineage: ppd_constant_expr.c1 EXPRESSION [] +POSTHOOK: Lineage: ppd_constant_expr.c1 SIMPLE [] POSTHOOK: Lineage: ppd_constant_expr.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: ppd_constant_expr.c3 EXPRESSION [] PREHOOK: query: SELECT ppd_constant_expr.* FROM ppd_constant_expr @@ -168,7 +168,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), UDFToInteger((UDFToDouble(key) - null)) (type: int), null (type: double) + expressions: null (type: void), UDFToInteger((UDFToDouble(key) - null)) (type: int), null (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -242,7 +242,7 @@ INSERT OVERWRITE TABLE ppd_constant_expr SELECT 4 + NULL, src1.key - NULL, NULL POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@ppd_constant_expr -POSTHOOK: Lineage: ppd_constant_expr.c1 EXPRESSION [] +POSTHOOK: Lineage: ppd_constant_expr.c1 SIMPLE [] POSTHOOK: Lineage: ppd_constant_expr.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: ppd_constant_expr.c3 EXPRESSION [] PREHOOK: query: SELECT ppd_constant_expr.* FROM ppd_constant_expr diff --git a/ql/src/test/results/clientpositive/quotedid_basic.q.out b/ql/src/test/results/clientpositive/quotedid_basic.q.out index 519f647..cae7f22 100644 --- a/ql/src/test/results/clientpositive/quotedid_basic.q.out +++ b/ql/src/test/results/clientpositive/quotedid_basic.q.out @@ -187,27 +187,27 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col2 (type: string), _col1 (type: string) + key expressions: '1' (type: string), _col1 (type: string) sort order: ++ - Map-reduce partition columns: _col2 (type: string) + Map-reduce partition columns: '1' (type: string) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: string) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: string + output shape: _col0: string, _col1: string type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction order by: _col1 - partition by: _col2 + partition by: '1' raw input shape: window functions: window function definition @@ -219,7 +219,7 @@ STAGE PLANS: isPivotResult: true Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), rank_window_0 (type: int) + expressions: _col0 (type: string), _col1 (type: string), '1' (type: string), rank_window_0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator @@ -291,27 +291,27 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col2 (type: string), _col1 (type: string) + key expressions: '1' (type: string), _col1 (type: string) sort order: ++ - Map-reduce partition columns: _col2 (type: string) + Map-reduce partition columns: '1' (type: string) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: string) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string) - outputColumnNames: _col0, _col1, _col2 + expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: string + output shape: _col0: string, _col1: string type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction order by: _col1 - partition by: _col2 + partition by: '1' raw input shape: window functions: window function definition @@ -323,7 +323,7 @@ STAGE PLANS: isPivotResult: true Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), rank_window_0 (type: int) + expressions: _col0 (type: string), _col1 (type: string), '1' (type: string), rank_window_0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out index b5e7846..57a89d6 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out @@ -1567,7 +1567,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) @@ -1688,7 +1688,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out index 5b03dcf..de37a15 100644 --- a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out @@ -324,8 +324,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[10][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 4' is a cross product -Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product +Warning: Shuffle Join JOIN[9][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 4' is a cross product +Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out index 93c502d..54b073e 100644 --- a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out @@ -339,8 +339,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Stage-1:MAPRED' is a cross product -Warning: Map Join MAPJOIN[25][bigTable=?] in task 'Stage-2:MAPRED' is a cross product +Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Stage-1:MAPRED' is a cross product +Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 diff --git a/ql/src/test/results/clientpositive/spark/vector_elt.q.out b/ql/src/test/results/clientpositive/spark/vector_elt.q.out index bb66867..b27798a 100644 --- a/ql/src/test/results/clientpositive/spark/vector_elt.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_elt.q.out @@ -84,7 +84,7 @@ STAGE PLANS: TableScan alias: alltypesorc Select Operator - expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string) + expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: void), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Limit Number of rows: 1 diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out index 296c256..c5ddfb0 100644 --- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out @@ -110,8 +110,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 input vertices: 1 Map 2 diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out index ed86079..53ad0ca 100644 --- a/ql/src/test/results/clientpositive/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin.q.out @@ -569,7 +569,7 @@ Manufacturer#4 almond azure aquamarine papaya violet 12 Manufacturer#5 almond antique blue firebrick mint 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -Warning: Shuffle Join JOIN[39][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- agg, non corr explain select p_name, p_size @@ -851,7 +851,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[39][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_name, p_size from part where part.p_size not in @@ -898,7 +898,7 @@ almond aquamarine sandy cyan gainsboro 18 almond aquamarine yellow dodger mint 7 almond azure aquamarine papaya violet 12 almond azure blanched chiffon midnight 23 -Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- agg, corr explain select p_mfgr, p_name, p_size @@ -1212,7 +1212,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out index 7b7ccda..a1c18a2 100644 --- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out +++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out @@ -737,7 +737,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out index 9582334..df82393 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out @@ -1481,7 +1481,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) @@ -1594,7 +1594,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [] POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select count(*) diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out index 0e7c681..28d54a9 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out @@ -324,8 +324,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[25][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out index efd8b5d..ccb8000 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out @@ -301,8 +301,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Map 2' is a cross product -Warning: Map Join MAPJOIN[25][bigTable=?] in task 'Reducer 3' is a cross product +Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Map 2' is a cross product +Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Reducer 3' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out index 346e52f..8ef2b06 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out @@ -1630,7 +1630,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1724,6 +1724,7 @@ STAGE PLANS: Tez Edges: Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -1744,7 +1745,7 @@ STAGE PLANS: Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Map-reduce partition columns: _col0 (type: string) + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reducer 2 @@ -1757,17 +1758,30 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false + Reduce Output Operator + key expressions: _col2 (type: string) + sort order: + + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.hive13_dp1 + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) + Reducer 3 + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out index c081309..24c81ef 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out @@ -2981,61 +2981,61 @@ Stage-0 limit:-1 Stage-1 Reducer 2 - File Output Operator [FS_15] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_14] + Select Operator [SEL_13] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_17] + Filter Operator [FIL_16] predicate:_col3 is null (type: boolean) Statistics:Num rows: 1 Data size: 269 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator [MERGEJOIN_19] + Merge Join Operator [MERGEJOIN_18] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"} | outputColumnNames:["_col0","_col1","_col3"] | Statistics:Num rows: 193 Data size: 51917 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - | Reduce Output Operator [RS_10] + | Reduce Output Operator [RS_9] | key expressions:_col1 (type: string) | Map-reduce partition columns:_col1 (type: string) | sort order:+ | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE | value expressions:_col0 (type: string) - | Select Operator [SEL_2] + | Select Operator [SEL_1] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE | TableScan [TS_0] | alias:b | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 4 [SIMPLE_EDGE] - Reduce Output Operator [RS_11] + Reduce Output Operator [RS_10] key expressions:_col1 (type: string) Map-reduce partition columns:_col1 (type: string) sort order:+ Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_9] + Select Operator [SEL_8] outputColumnNames:["_col1"] Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_8] + Group By Operator [GBY_7] | keys:KEY._col0 (type: string), KEY._col1 (type: string) | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_7] + Reduce Output Operator [RS_6] key expressions:_col0 (type: string), _col1 (type: string) Map-reduce partition columns:_col0 (type: string), _col1 (type: string) sort order:++ Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_6] + Group By Operator [GBY_5] keys:key (type: string), value (type: string) outputColumnNames:["_col0","_col1"] Statistics:Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_18] + Filter Operator [FIL_17] predicate:(value > 'val_2') (type: boolean) Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_3] + TableScan [TS_2] alias:b Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE @@ -3068,57 +3068,57 @@ Stage-0 limit:-1 Stage-1 Reducer 3 - File Output Operator [FS_15] + File Output Operator [FS_14] compressed:false Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_14] + Select Operator [SEL_13] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator [FIL_17] + Filter Operator [FIL_16] predicate:_col3 is null (type: boolean) Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator [MERGEJOIN_19] + Merge Join Operator [MERGEJOIN_18] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"_col1 (type: string), _col0 (type: string)","1":"_col0 (type: string), _col1 (type: string)"} | outputColumnNames:["_col0","_col1","_col3"] | Statistics:Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 4 [SIMPLE_EDGE] - | Reduce Output Operator [RS_11] + | Reduce Output Operator [RS_10] | key expressions:_col0 (type: string), _col1 (type: string) | Map-reduce partition columns:_col0 (type: string), _col1 (type: string) | sort order:++ | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_9] + | Select Operator [SEL_8] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_18] + | Filter Operator [FIL_17] | predicate:(value > 'val_12') (type: boolean) | Statistics:Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_7] + | TableScan [TS_6] | alias:b | Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 2 [SIMPLE_EDGE] - Reduce Output Operator [RS_10] + Reduce Output Operator [RS_9] key expressions:_col1 (type: string), _col0 (type: string) Map-reduce partition columns:_col1 (type: string), _col0 (type: string) sort order:++ Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_5] + Group By Operator [GBY_4] | keys:KEY._col0 (type: string), KEY._col1 (type: string) | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 1 [SIMPLE_EDGE] - Reduce Output Operator [RS_4] + Reduce Output Operator [RS_3] key expressions:_col0 (type: string), _col1 (type: string) Map-reduce partition columns:_col0 (type: string), _col1 (type: string) sort order:++ Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_3] + Group By Operator [GBY_2] keys:key (type: string), value (type: string) outputColumnNames:["_col0","_col1"] Statistics:Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator [SEL_2] + Select Operator [SEL_1] outputColumnNames:["key","value"] Statistics:Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE TableScan [TS_0] @@ -3953,113 +3953,113 @@ Stage-0 limit:-1 Stage-1 Reducer 4 - File Output Operator [FS_37] + File Output Operator [FS_36] compressed:false Statistics:Num rows: 15 Data size: 1966 Basic stats: COMPLETE Column stats: NONE table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"} - Select Operator [SEL_36] + Select Operator [SEL_35] | outputColumnNames:["_col0","_col1"] | Statistics:Num rows: 15 Data size: 1966 Basic stats: COMPLETE Column stats: NONE |<-Reducer 3 [SIMPLE_EDGE] - Reduce Output Operator [RS_35] + Reduce Output Operator [RS_34] key expressions:_col0 (type: string) sort order:+ Statistics:Num rows: 15 Data size: 1966 Basic stats: COMPLETE Column stats: NONE value expressions:_col1 (type: int) - Select Operator [SEL_34] + Select Operator [SEL_33] outputColumnNames:["_col0","_col1"] Statistics:Num rows: 15 Data size: 1966 Basic stats: COMPLETE Column stats: NONE - Filter Operator [FIL_40] + Filter Operator [FIL_39] predicate:_col3 is null (type: boolean) Statistics:Num rows: 15 Data size: 1966 Basic stats: COMPLETE Column stats: NONE - Merge Join Operator [MERGEJOIN_47] + Merge Join Operator [MERGEJOIN_46] | condition map:[{"":"Left Outer Join0 to 1"}] | keys:{"0":"UDFToDouble(_col1) (type: double)","1":"_col0 (type: double)"} | outputColumnNames:["_col0","_col1","_col3"] | Statistics:Num rows: 30 Data size: 3932 Basic stats: COMPLETE Column stats: NONE |<-Reducer 2 [SIMPLE_EDGE] - | Reduce Output Operator [RS_30] + | Reduce Output Operator [RS_29] | key expressions:UDFToDouble(_col1) (type: double) | Map-reduce partition columns:UDFToDouble(_col1) (type: double) | sort order:+ | Statistics:Num rows: 28 Data size: 3575 Basic stats: COMPLETE Column stats: NONE | value expressions:_col0 (type: string), _col1 (type: int) - | Merge Join Operator [MERGEJOIN_46] + | Merge Join Operator [MERGEJOIN_45] | | condition map:[{"":"Inner Join 0 to 1"}] | | keys:{} | | outputColumnNames:["_col0","_col1"] | | Statistics:Num rows: 28 Data size: 3575 Basic stats: COMPLETE Column stats: NONE | |<-Map 1 [SIMPLE_EDGE] - | | Reduce Output Operator [RS_27] + | | Reduce Output Operator [RS_26] | | sort order: | | Statistics:Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE | | value expressions:_col0 (type: string), _col1 (type: int) - | | Select Operator [SEL_2] + | | Select Operator [SEL_1] | | outputColumnNames:["_col0","_col1"] | | Statistics:Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE | | TableScan [TS_0] | | alias:part | | Statistics:Num rows: 26 Data size: 3250 Basic stats: COMPLETE Column stats: COMPLETE | |<-Reducer 6 [SIMPLE_EDGE] - | Reduce Output Operator [RS_28] + | Reduce Output Operator [RS_27] | sort order: | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - | Select Operator [SEL_16] + | Select Operator [SEL_15] | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - | Filter Operator [FIL_41] + | Filter Operator [FIL_40] | predicate:(_col0 = 0) (type: boolean) | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_15] + | Group By Operator [GBY_14] | aggregations:["count()"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - | Select Operator [SEL_9] + | Select Operator [SEL_8] | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - | Filter Operator [FIL_42] + | Filter Operator [FIL_41] | predicate:_col0 is null (type: boolean) | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - | Group By Operator [GBY_8] + | Group By Operator [GBY_7] | | aggregations:["avg(VALUE._col0)"] | | outputColumnNames:["_col0"] | | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE | |<-Map 5 [SIMPLE_EDGE] - | Reduce Output Operator [RS_7] + | Reduce Output Operator [RS_6] | sort order: | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE | value expressions:_col0 (type: struct) - | Group By Operator [GBY_6] + | Group By Operator [GBY_5] | aggregations:["avg(p_size)"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - | Filter Operator [FIL_43] + | Filter Operator [FIL_42] | predicate:(p_size < 10) (type: boolean) | Statistics:Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - | TableScan [TS_3] + | TableScan [TS_2] | alias:part | Statistics:Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE |<-Reducer 8 [SIMPLE_EDGE] - Reduce Output Operator [RS_31] + Reduce Output Operator [RS_30] key expressions:_col0 (type: double) Map-reduce partition columns:_col0 (type: double) sort order:+ Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator [GBY_24] + Group By Operator [GBY_23] | aggregations:["avg(VALUE._col0)"] | outputColumnNames:["_col0"] | Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE |<-Map 7 [SIMPLE_EDGE] - Reduce Output Operator [RS_23] + Reduce Output Operator [RS_22] sort order: Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE value expressions:_col0 (type: struct) - Group By Operator [GBY_22] + Group By Operator [GBY_21] aggregations:["avg(p_size)"] outputColumnNames:["_col0"] Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Filter Operator [FIL_45] + Filter Operator [FIL_44] predicate:(p_size < 10) (type: boolean) Statistics:Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - TableScan [TS_19] + TableScan [TS_18] alias:part Statistics:Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/tez/mergejoin.q.out b/ql/src/test/results/clientpositive/tez/mergejoin.q.out index b2b3872..5b41385 100644 --- a/ql/src/test/results/clientpositive/tez/mergejoin.q.out +++ b/ql/src/test/results/clientpositive/tez/mergejoin.q.out @@ -1472,6 +1472,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b + filterExpr: (key) IN (RS[4]) (type: boolean) Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) @@ -1564,6 +1565,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a + filterExpr: (key) IN (RS[5]) (type: boolean) Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out index 9a5d047..5bc04d7 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_round.q.out @@ -118,7 +118,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -268,7 +268,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -419,7 +419,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/tez/vector_elt.q.out b/ql/src/test/results/clientpositive/tez/vector_elt.q.out index bb66867..b27798a 100644 --- a/ql/src/test/results/clientpositive/tez/vector_elt.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_elt.q.out @@ -84,7 +84,7 @@ STAGE PLANS: TableScan alias: alltypesorc Select Operator - expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string) + expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: void), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Limit Number of rows: 1 diff --git a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out index 7eb28f8..3dfc90b 100644 --- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out @@ -50,8 +50,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 input vertices: 1 Map 2 diff --git a/ql/src/test/results/clientpositive/udf1.q.out b/ql/src/test/results/clientpositive/udf1.q.out index dffbccf..b3b694b 100644 --- a/ql/src/test/results/clientpositive/udf1.q.out +++ b/ql/src/test/results/clientpositive/udf1.q.out @@ -137,26 +137,26 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c10 EXPRESSION [] -POSTHOOK: Lineage: dest1.c11 EXPRESSION [] -POSTHOOK: Lineage: dest1.c12 EXPRESSION [] -POSTHOOK: Lineage: dest1.c13 EXPRESSION [] -POSTHOOK: Lineage: dest1.c14 EXPRESSION [] -POSTHOOK: Lineage: dest1.c15 EXPRESSION [] -POSTHOOK: Lineage: dest1.c16 EXPRESSION [] -POSTHOOK: Lineage: dest1.c17 EXPRESSION [] -POSTHOOK: Lineage: dest1.c18 EXPRESSION [] -POSTHOOK: Lineage: dest1.c19 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c20 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Lineage: dest1.c10 SIMPLE [] +POSTHOOK: Lineage: dest1.c11 SIMPLE [] +POSTHOOK: Lineage: dest1.c12 SIMPLE [] +POSTHOOK: Lineage: dest1.c13 SIMPLE [] +POSTHOOK: Lineage: dest1.c14 SIMPLE [] +POSTHOOK: Lineage: dest1.c15 SIMPLE [] +POSTHOOK: Lineage: dest1.c16 SIMPLE [] +POSTHOOK: Lineage: dest1.c17 SIMPLE [] +POSTHOOK: Lineage: dest1.c18 SIMPLE [] +POSTHOOK: Lineage: dest1.c19 SIMPLE [] +POSTHOOK: Lineage: dest1.c2 SIMPLE [] +POSTHOOK: Lineage: dest1.c20 SIMPLE [] +POSTHOOK: Lineage: dest1.c3 SIMPLE [] +POSTHOOK: Lineage: dest1.c4 SIMPLE [] +POSTHOOK: Lineage: dest1.c5 SIMPLE [] +POSTHOOK: Lineage: dest1.c6 SIMPLE [] +POSTHOOK: Lineage: dest1.c7 SIMPLE [] +POSTHOOK: Lineage: dest1.c8 SIMPLE [] +POSTHOOK: Lineage: dest1.c9 SIMPLE [] PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 diff --git a/ql/src/test/results/clientpositive/udf_10_trims.q.out b/ql/src/test/results/clientpositive/udf_10_trims.q.out index 2f79723..3a5303a 100644 --- a/ql/src/test/results/clientpositive/udf_10_trims.q.out +++ b/ql/src/test/results/clientpositive/udf_10_trims.q.out @@ -117,4 +117,4 @@ WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1.c1 SIMPLE [] diff --git a/ql/src/test/results/clientpositive/udf_hour.q.out b/ql/src/test/results/clientpositive/udf_hour.q.out index cf7f78b..4eb5a00 100644 --- a/ql/src/test/results/clientpositive/udf_hour.q.out +++ b/ql/src/test/results/clientpositive/udf_hour.q.out @@ -39,7 +39,7 @@ STAGE PLANS: predicate: (UDFToDouble(key) = 86.0) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 13 (type: int), 13 (type: int), null (type: int) + expressions: 13 (type: int), 13 (type: int), null (type: void) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink diff --git a/ql/src/test/results/clientpositive/udf_minute.q.out b/ql/src/test/results/clientpositive/udf_minute.q.out index 3af42c4..ebd07c5 100644 --- a/ql/src/test/results/clientpositive/udf_minute.q.out +++ b/ql/src/test/results/clientpositive/udf_minute.q.out @@ -39,7 +39,7 @@ STAGE PLANS: predicate: (UDFToDouble(key) = 86.0) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 14 (type: int), 14 (type: int), null (type: int) + expressions: 14 (type: int), 14 (type: int), null (type: void) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/udf_parse_url.q.out b/ql/src/test/results/clientpositive/udf_parse_url.q.out index 5ea8f40..f657fa9 100644 --- a/ql/src/test/results/clientpositive/udf_parse_url.q.out +++ b/ql/src/test/results/clientpositive/udf_parse_url.q.out @@ -60,7 +60,7 @@ STAGE PLANS: predicate: (UDFToDouble(key) = 86.0) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 'facebook.com' (type: string), '/path1/p.php' (type: string), 'k1=v1&k2=v2' (type: string), 'Ref1' (type: string), 'v2' (type: string), 'v1' (type: string), null (type: string), '/path1/p.php?k1=v1&k2=v2' (type: string), 'http' (type: string), null (type: string), 'facebook.com' (type: string) + expressions: 'facebook.com' (type: string), '/path1/p.php' (type: string), 'k1=v1&k2=v2' (type: string), 'Ref1' (type: string), 'v2' (type: string), 'v1' (type: string), null (type: void), '/path1/p.php?k1=v1&k2=v2' (type: string), 'http' (type: string), null (type: void), 'facebook.com' (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/udf_second.q.out b/ql/src/test/results/clientpositive/udf_second.q.out index 4ee919a..fcd1143 100644 --- a/ql/src/test/results/clientpositive/udf_second.q.out +++ b/ql/src/test/results/clientpositive/udf_second.q.out @@ -39,7 +39,7 @@ STAGE PLANS: predicate: (UDFToDouble(key) = 86.0) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 15 (type: int), 15 (type: int), null (type: int) + expressions: 15 (type: int), 15 (type: int), null (type: void) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/vector_decimal_round.q.out index 25e5cfa..ec6226e 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out @@ -106,7 +106,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -242,7 +242,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) @@ -379,7 +379,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: round(_col0, (- 1)) (type: decimal(11,0)) + key expressions: round(_col0, -1) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: decimal(10,0)) diff --git a/ql/src/test/results/clientpositive/vector_elt.q.out b/ql/src/test/results/clientpositive/vector_elt.q.out index e3fa9ed..3a2c1fc 100644 --- a/ql/src/test/results/clientpositive/vector_elt.q.out +++ b/ql/src/test/results/clientpositive/vector_elt.q.out @@ -102,7 +102,7 @@ STAGE PLANS: alias: alltypesorc Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string) + expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: void), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 12288 Data size: 8687616 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out index dd40f28..4026a5c 100644 --- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out @@ -232,8 +232,8 @@ STAGE PLANS: condition map: Left Semi Join 0 to 1 keys: - 0 _col0 (type: int) - 1 _col0 (type: int) + 0 _col0 (type: int), 1 (type: int) + 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col1, _col2 Statistics: Num rows: 55 Data size: 6598 Basic stats: COMPLETE Column stats: NONE File Output Operator