diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelFactories.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelFactories.java index cf93ed8..75e0f07 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelFactories.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelFactories.java @@ -45,6 +45,7 @@ import org.apache.calcite.util.ImmutableBitSet; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin; @@ -199,10 +200,15 @@ public RelNode createAggregate(RelNode child, boolean indicator, private static class HiveSetOpFactoryImpl implements SetOpFactory { @Override public RelNode createSetOp(SqlKind kind, List inputs, boolean all) { - if (kind != SqlKind.UNION) { - throw new IllegalStateException("Expected to get Set operator of type Union. Found : " + kind); + if (kind == SqlKind.UNION) { + return new HiveUnion(inputs.get(0).getCluster(), inputs.get(0).getTraitSet(), inputs); + } else if (kind == SqlKind.INTERSECT) { + return new HiveIntersect(inputs.get(0).getCluster(), inputs.get(0).getTraitSet(), inputs, + all); + } else { + throw new IllegalStateException("Expected to get Set operator of type Union. Found : " + + kind); } - return new HiveUnion(inputs.get(0).getCluster(), inputs.get(0).getTraitSet(), inputs); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveIntersect.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveIntersect.java new file mode 100644 index 0000000..3d06f13 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveIntersect.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators; + +import java.util.List; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelTraitSet; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Intersect; +import org.apache.calcite.rel.core.SetOp; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode.Implementor; + +public class HiveIntersect extends Intersect { + + public HiveIntersect(RelOptCluster cluster, RelTraitSet traits, List inputs, boolean all) { + super(cluster, traits, inputs, all); + } + + @Override + public SetOp copy(RelTraitSet traitSet, List inputs, boolean all) { + return new HiveIntersect(this.getCluster(), traitSet, inputs, all); + } + + public void implement(Implementor implementor) { + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectMergeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectMergeRule.java new file mode 100644 index 0000000..a551841 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectMergeRule.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.rel.RelNode; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect; +import org.apache.calcite.util.Util; + +/** + * Planner rule that merges multiple + * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect} + */ +public class HiveIntersectMergeRule extends RelOptRule { + + public static final HiveIntersectMergeRule INSTANCE = new HiveIntersectMergeRule(); + + // ~ Constructors ----------------------------------------------------------- + + private HiveIntersectMergeRule() { + super( + operand(HiveIntersect.class, operand(RelNode.class, any()), operand(RelNode.class, any()))); + } + + // ~ Methods ---------------------------------------------------------------- + + public void onMatch(RelOptRuleCall call) { + final HiveIntersect topHiveIntersect = call.rel(0); + + final HiveIntersect bottomHiveIntersect; + if (call.rel(2) instanceof HiveIntersect) { + bottomHiveIntersect = call.rel(2); + } else if (call.rel(1) instanceof HiveIntersect) { + bottomHiveIntersect = call.rel(1); + } else { + return; + } + + boolean all = topHiveIntersect.all; + if (all != bottomHiveIntersect.all) { + return; + } + + List inputs = new ArrayList<>(); + if (call.rel(2) instanceof HiveIntersect) { + assert topHiveIntersect.getInputs().size() == 2; + inputs.add(topHiveIntersect.getInput(0)); + inputs.addAll(bottomHiveIntersect.getInputs()); + } else { + inputs.addAll(bottomHiveIntersect.getInputs()); + inputs.addAll(Util.skip(topHiveIntersect.getInputs())); + } + + HiveIntersect newIntersect = (HiveIntersect) topHiveIntersect.copy( + topHiveIntersect.getTraitSet(), inputs, all); + call.transformTo(newIntersect); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectRewriteRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectRewriteRule.java new file mode 100644 index 0000000..0a27d51 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectRewriteRule.java @@ -0,0 +1,218 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.List; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Aggregate; +import org.apache.calcite.rel.core.AggregateCall; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.SqlAggFunction; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; +import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.calcite.tools.RelBuilder; +import org.apache.calcite.util.ImmutableBitSet; +import org.apache.calcite.util.Util; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import com.google.common.collect.Lists; + +/** + * Planner rule that rewrite + * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect} + * where all = false (i.e., ) + */ +public class HiveIntersectRewriteRule extends RelOptRule { + + public static final HiveIntersectRewriteRule INSTANCE = new HiveIntersectRewriteRule(); + + // ~ Constructors ----------------------------------------------------------- + + private HiveIntersectRewriteRule() { + super(operand(HiveIntersect.class, any())); + } + + // ~ Methods ---------------------------------------------------------------- + + public void onMatch(RelOptRuleCall call) { + final HiveIntersect hiveIntersect = call.rel(0); + + // TODO: support intersect all + if (hiveIntersect.all) { + return; + } + + final RelOptCluster cluster = hiveIntersect.getCluster(); + final RexBuilder rexBuilder = cluster.getRexBuilder(); + int numOfBranch = hiveIntersect.getInputs().size(); + Builder bldr = new ImmutableList.Builder(); + + // 1st level GB: create a GB (col0, col1, count(1) as c) for each branch + for (int index = 0; index < numOfBranch; index++) { + RelNode input = hiveIntersect.getInputs().get(index); + final List gbChildProjLst = Lists.newArrayList(); + final List groupSetPositions = Lists.newArrayList(); + for (int cInd = 0; cInd < input.getRowType().getFieldList().size(); cInd++) { + gbChildProjLst.add(rexBuilder.makeInputRef(input, cInd)); + groupSetPositions.add(cInd); + } + gbChildProjLst.add(rexBuilder.makeBigintLiteral(new BigDecimal(1))); + + // create the project before GB + RelNode gbInputRel = null; + try { + gbInputRel = HiveProject.create(input, gbChildProjLst, null); + } catch (CalciteSemanticException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + // groupSetPosition includes all the positions + final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions); + + List aggregateCalls = Lists.newArrayList(); + RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, + cluster.getTypeFactory()); + ImmutableList.Builder aggArgRelDTBldr = new ImmutableList.Builder(); + SqlAggFunction aggFunction = SqlFunctionConverter.getCalciteAggFn("count", false, + aggArgRelDTBldr.build(), aggFnRetType); + List argList = new ArrayList(); + // the last "1" is the argument as we are count(1) as c. + argList.add(input.getRowType().getFieldList().size()); + AggregateCall aggregateCall = new AggregateCall(aggFunction, false, argList, aggFnRetType, + null); + aggregateCalls.add(aggregateCall); + HiveRelNode aggregateRel = new HiveAggregate(cluster, + cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, false, groupSet, null, + aggregateCalls); + bldr.add(aggregateRel); + } + + // create a union above all the branches + HiveRelNode union = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build()); + + // 2nd level GB: create a GB (col0, col1, count(c)) for each branch + final List gbChildProjLst = Lists.newArrayList(); + final List groupSetPositions = Lists.newArrayList(); + // the last one is c + int lastInd = union.getRowType().getFieldList().size() - 1; + for (int cInd = 0; cInd < union.getRowType().getFieldList().size(); cInd++) { + gbChildProjLst.add(rexBuilder.makeInputRef(union, cInd)); + if (cInd != lastInd) { + groupSetPositions.add(cInd); + } + } + RelNode gbInputRel = null; + try { + gbInputRel = HiveProject.create(union, gbChildProjLst, null); + } catch (CalciteSemanticException e2) { + // TODO Auto-generated catch block + e2.printStackTrace(); + } + + List aggregateCalls = Lists.newArrayList(); + RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, + cluster.getTypeFactory()); + ImmutableList.Builder aggArgRelDTBldr = new ImmutableList.Builder(); + SqlAggFunction aggFunction = SqlFunctionConverter.getCalciteAggFn("count", false, + aggArgRelDTBldr.build(), aggFnRetType); + List argList = new ArrayList(); + argList.add(lastInd); + AggregateCall aggregateCall = new AggregateCall(aggFunction, false, argList, aggFnRetType, null); + aggregateCalls.add(aggregateCall); + + final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions); + HiveRelNode aggregateRel = new HiveAggregate(cluster, + cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, false, groupSet, null, aggregateCalls); + + // add a filter count(c) = #branches + List childRexNodeLst = new ArrayList(); + RexInputRef ref = rexBuilder.makeInputRef(aggregateRel, lastInd); + RexLiteral literal = rexBuilder.makeBigintLiteral(new BigDecimal(numOfBranch)); + childRexNodeLst.add(ref); + childRexNodeLst.add(literal); + ImmutableList.Builder calciteArgTypesBldr = new ImmutableList.Builder(); + calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, + cluster.getTypeFactory())); + calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, + cluster.getTypeFactory())); + // deterministic = false? + RexNode factoredFilterExpr = null; + try { + factoredFilterExpr = rexBuilder + .makeCall( + SqlFunctionConverter.getCalciteFn("=", calciteArgTypesBldr.build(), + TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()), + false), childRexNodeLst); + } catch (CalciteSemanticException e1) { + // TODO Auto-generated catch block + e1.printStackTrace(); + } + + RelNode filterRel = new HiveFilter(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), + aggregateRel, factoredFilterExpr); + + // finally add a project to project out the last column + List originalInputRefs = Lists.transform(filterRel.getRowType().getFieldList(), + new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }); + List copyInputRefs = new ArrayList<>(); + for (int i = 0; i < originalInputRefs.size() - 1; i++) { + copyInputRefs.add(originalInputRefs.get(i)); + } + RelNode srcRel = null; + try { + srcRel = HiveProject.create(filterRel, copyInputRefs, null); + } catch (CalciteSemanticException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + call.transformTo(srcRel); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java index 9f5e733..6d49bbe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java @@ -300,8 +300,8 @@ private Schema getRowSchema(String tblAlias) { } private QueryBlockInfo convertSource(RelNode r) throws CalciteSemanticException { - Schema s; - ASTNode ast; + Schema s = null; + ASTNode ast = null; if (r instanceof TableScan) { TableScan f = (TableScan) r; @@ -333,19 +333,15 @@ private QueryBlockInfo convertSource(RelNode r) throws CalciteSemanticException s = left.schema; } } else if (r instanceof Union) { - RelNode leftInput = ((Union) r).getInput(0); - RelNode rightInput = ((Union) r).getInput(1); - - ASTConverter leftConv = new ASTConverter(leftInput, this.derivedTableCount); - ASTConverter rightConv = new ASTConverter(rightInput, this.derivedTableCount); - ASTNode leftAST = leftConv.convert(); - ASTNode rightAST = rightConv.convert(); - - ASTNode unionAST = getUnionAllAST(leftAST, rightAST); - - String sqAlias = nextAlias(); - ast = ASTBuilder.subQuery(unionAST, sqAlias); - s = new Schema((Union) r, sqAlias); + Union u = ((Union) r); + ASTNode left = new ASTConverter(((Union) r).getInput(0), this.derivedTableCount).convert(); + for (int ind = 1; ind < u.getInputs().size(); ind++) { + left = getUnionAllAST(left, new ASTConverter(((Union) r).getInput(ind), + this.derivedTableCount).convert()); + String sqAlias = nextAlias(); + ast = ASTBuilder.subQuery(left, sqAlias); + s = new Schema((Union) r, sqAlias); + } } else { ASTConverter src = new ASTConverter(r, this.derivedTableCount); ASTNode srcAST = src.convert(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index ff94160..f591f36 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -63,6 +63,7 @@ import org.apache.calcite.rel.core.Filter; import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.rel.core.SetOp; import org.apache.calcite.rel.metadata.CachingRelMetadataProvider; import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataProvider; @@ -139,6 +140,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode; @@ -157,6 +159,8 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterSetOpTransposeRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterSortTransposeRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveInsertExchange4JoinRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveIntersectMergeRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveIntersectRewriteRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinAddNotNullRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinCommuteRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinProjectTransposeRule; @@ -192,6 +196,7 @@ import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec; +import org.apache.hadoop.hive.ql.parse.QBExpr.Opcode; import org.apache.hadoop.hive.ql.parse.QBSubQuery.SubQueryType; import org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec; import org.apache.hadoop.hive.ql.parse.WindowingSpec.RangeBoundarySpec; @@ -1151,6 +1156,20 @@ private RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataProv final int maxCNFNodeCount = conf.getIntVar(HiveConf.ConfVars.HIVE_CBO_CNF_NODES_LIMIT); final int minNumORClauses = conf.getIntVar(HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN); + //0. SetOp rewrite + + perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER); + basePlan = hepPlan(basePlan, false, mdProvider, null, HepMatchOrder.BOTTOM_UP, + HiveIntersectMergeRule.INSTANCE); + perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, + "Calcite: HiveIntersectMerge rule"); + + perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER); + basePlan = hepPlan(basePlan, false, mdProvider, null, HepMatchOrder.BOTTOM_UP, + HiveIntersectRewriteRule.INSTANCE); + perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, + "Calcite: HiveIntersectRewrite rule"); + //1. Distinct aggregate rewrite // Run this optimization early, since it is expanding the operator pipeline. if (!conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("mr") && @@ -1343,18 +1362,16 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, } @SuppressWarnings("nls") - private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode leftRel, + private RelNode genSetOpLogicalPlan(Opcode opcode, String alias, String leftalias, RelNode leftRel, String rightalias, RelNode rightRel) throws SemanticException { - HiveUnion unionRel = null; - // 1. Get Row Resolvers, Column map for original left and right input of - // Union Rel + // SetOp Rel RowResolver leftRR = this.relToHiveRR.get(leftRel); RowResolver rightRR = this.relToHiveRR.get(rightRel); HashMap leftmap = leftRR.getFieldMap(leftalias); HashMap rightmap = rightRR.getFieldMap(rightalias); - // 2. Validate that Union is feasible according to Hive (by using type + // 2. Validate that SetOp is feasible according to Hive (by using type // info from RR) if (leftmap.size() != rightmap.size()) { throw new SemanticException("Schema of both sides of union should match."); @@ -1363,8 +1380,8 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode ASTNode tabref = getQB().getAliases().isEmpty() ? null : getQB().getParseInfo() .getSrcForAlias(getQB().getAliases().get(0)); - // 3. construct Union Output RR using original left & right Input - RowResolver unionoutRR = new RowResolver(); + // 3. construct SetOp Output RR using original left & right Input + RowResolver setOpOutRR = new RowResolver(); Iterator> lIter = leftmap.entrySet().iterator(); Iterator> rIter = rightmap.entrySet().iterator(); @@ -1380,18 +1397,18 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode rInfo.getType()); if (commonTypeInfo == null) { throw new SemanticException(generateErrorMessage(tabref, - "Schema of both sides of union should match: Column " + field + "Schema of both sides of setop should match: Column " + field + " is of type " + lInfo.getType().getTypeName() + " on first table and type " + rInfo.getType().getTypeName() + " on second table")); } - ColumnInfo unionColInfo = new ColumnInfo(lInfo); - unionColInfo.setType(commonTypeInfo); - unionoutRR.put(unionalias, field, unionColInfo); + ColumnInfo setOpColInfo = new ColumnInfo(lInfo); + setOpColInfo.setType(commonTypeInfo); + setOpOutRR.put(alias, field, setOpColInfo); } // 4. Determine which columns requires cast on left/right input (Calcite - // requires exact types on both sides of union) + // requires exact types on both sides of SetOp) boolean leftNeedsTypeCast = false; boolean rightNeedsTypeCast = false; List leftProjs = new ArrayList(); @@ -1406,7 +1423,7 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode leftFieldDT = leftRowDT.get(i).getType(); rightFieldDT = rightRowDT.get(i).getType(); if (!leftFieldDT.equals(rightFieldDT)) { - unionFieldDT = TypeConverter.convert(unionoutRR.getColumnInfos().get(i).getType(), + unionFieldDT = TypeConverter.convert(setOpOutRR.getColumnInfos().get(i).getType(), cluster.getTypeFactory()); if (!unionFieldDT.equals(leftFieldDT)) { leftNeedsTypeCast = true; @@ -1429,28 +1446,41 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode // 5. Introduce Project Rel above original left/right inputs if cast is // needed for type parity - RelNode unionLeftInput = leftRel; - RelNode unionRightInput = rightRel; + RelNode setOpLeftInput = leftRel; + RelNode setOpRightInput = rightRel; if (leftNeedsTypeCast) { - unionLeftInput = HiveProject.create(leftRel, leftProjs, leftRel.getRowType() + setOpLeftInput = HiveProject.create(leftRel, leftProjs, leftRel.getRowType() .getFieldNames()); } if (rightNeedsTypeCast) { - unionRightInput = HiveProject.create(rightRel, rightProjs, rightRel.getRowType() + setOpRightInput = HiveProject.create(rightRel, rightProjs, rightRel.getRowType() .getFieldNames()); } - // 6. Construct Union Rel + // 6. Construct SetOp Rel Builder bldr = new ImmutableList.Builder(); - bldr.add(unionLeftInput); - bldr.add(unionRightInput); - unionRel = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build()); - - relToHiveRR.put(unionRel, unionoutRR); - relToHiveColNameCalcitePosMap.put(unionRel, - this.buildHiveToCalciteColumnMap(unionoutRR, unionRel)); - - return unionRel; + bldr.add(setOpLeftInput); + bldr.add(setOpRightInput); + SetOp setOpRel = null; + switch (opcode) { + case UNION: + setOpRel = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build()); + break; + case INTERSECT: + setOpRel = new HiveIntersect(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build(), + false); + break; + case INTERSECTALL: + setOpRel = new HiveIntersect(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build(), + true); + break; + default: + throw new SemanticException("Unsupported set operator."); + } + relToHiveRR.put(setOpRel, setOpOutRR); + relToHiveColNameCalcitePosMap.put(setOpRel, + this.buildHiveToCalciteColumnMap(setOpOutRR, setOpRel)); + return setOpRel; } private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJoinType, @@ -2045,9 +2075,9 @@ private RelNode genFilterLogicalPlan(QB qb, RelNode srcRel, Map */ private class AggInfo { private final List m_aggParams; - private final TypeInfo m_returnType; - private final String m_udfName; - private final boolean m_distinct; + private final TypeInfo m_returnType; + private final String m_udfName; + private final boolean m_distinct; private AggInfo(List aggParams, TypeInfo returnType, String udfName, boolean isDistinct) { @@ -2057,7 +2087,7 @@ private AggInfo(List aggParams, TypeInfo returnType, String udfNam m_distinct = isDistinct; } } - + private AggregateCall convertGBAgg(AggInfo agg, RelNode input, List gbChildProjLst, RexNodeConverter converter, HashMap rexNodeToPosMap, Integer childProjLstIndx) throws SemanticException { @@ -3137,17 +3167,19 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel, RelNode starSrcRel) } private RelNode genLogicalPlan(QBExpr qbexpr) throws SemanticException { - if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) { + switch (qbexpr.getOpcode()) { + case NULLOP: return genLogicalPlan(qbexpr.getQB(), false); - } - if (qbexpr.getOpcode() == QBExpr.Opcode.UNION) { + case UNION: + case INTERSECT: + case INTERSECTALL: RelNode qbexpr1Ops = genLogicalPlan(qbexpr.getQBExpr1()); RelNode qbexpr2Ops = genLogicalPlan(qbexpr.getQBExpr2()); - - return genUnionLogicalPlan(qbexpr.getAlias(), qbexpr.getQBExpr1().getAlias(), qbexpr1Ops, - qbexpr.getQBExpr2().getAlias(), qbexpr2Ops); + return genSetOpLogicalPlan(qbexpr.getOpcode(), qbexpr.getAlias(), qbexpr.getQBExpr1() + .getAlias(), qbexpr1Ops, qbexpr.getQBExpr2().getAlias(), qbexpr2Ops); + default: + return null; } - return null; } private RelNode genLogicalPlan(QB qb, boolean outerMostQB) throws SemanticException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index 7ceb005..a487fbb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -96,6 +96,7 @@ KW_CLUSTER: 'CLUSTER'; KW_DISTRIBUTE: 'DISTRIBUTE'; KW_SORT: 'SORT'; KW_UNION: 'UNION'; +KW_EXCEPT: 'EXCEPT'; KW_LOAD: 'LOAD'; KW_EXPORT: 'EXPORT'; KW_IMPORT: 'IMPORT'; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index df596ff..5188706 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -88,6 +88,10 @@ TOK_DISTRIBUTEBY; TOK_SORTBY; TOK_UNIONALL; TOK_UNIONDISTINCT; +TOK_INTERSECTALL; +TOK_INTERSECTDISTINCT; +TOK_MINUSALL; +TOK_MINUSDISTINCT; TOK_JOIN; TOK_LEFTOUTERJOIN; TOK_RIGHTOUTERJOIN; @@ -446,6 +450,8 @@ import org.apache.hadoop.hive.conf.HiveConf; xlateMap.put("KW_DISTRIBUTE", "DISTRIBUTE"); xlateMap.put("KW_SORT", "SORT"); xlateMap.put("KW_UNION", "UNION"); + xlateMap.put("KW_INTERSECT", "INTERSECT"); + xlateMap.put("KW_EXCEPT", "EXCEPT"); xlateMap.put("KW_LOAD", "LOAD"); xlateMap.put("KW_DATA", "DATA"); xlateMap.put("KW_INPATH", "INPATH"); @@ -2277,6 +2283,12 @@ setOperator @after { popMsg(state); } : KW_UNION KW_ALL -> ^(TOK_UNIONALL) | KW_UNION KW_DISTINCT? -> ^(TOK_UNIONDISTINCT) + | KW_INTERSECT KW_ALL -> ^(TOK_INTERSECTALL) + | KW_INTERSECT KW_DISTINCT -> ^(TOK_INTERSECTDISTINCT) + | KW_EXCEPT KW_ALL -> ^(TOK_MINUSALL) + | KW_EXCEPT KW_DISTINCT -> ^(TOK_MINUSDISTINCT) + | KW_MINUS KW_ALL -> ^(TOK_MINUSALL) + | KW_MINUS KW_DISTINCT -> ^(TOK_MINUSDISTINCT) ; queryStatementExpression[boolean topLevel] @@ -2407,7 +2419,7 @@ setOpSelectStatement[CommonTree t, boolean topLevel] ) ) -> {$setOpSelectStatement.tree != null && u.tree.getType()!=HiveParser.TOK_UNIONDISTINCT}? - ^(TOK_UNIONALL {$setOpSelectStatement.tree} $b) + ^($u {$setOpSelectStatement.tree} $b) -> {$setOpSelectStatement.tree == null && u.tree.getType()==HiveParser.TOK_UNIONDISTINCT}? ^(TOK_QUERY ^(TOK_FROM @@ -2421,7 +2433,7 @@ setOpSelectStatement[CommonTree t, boolean topLevel] ^(TOK_SELECTDI ^(TOK_SELEXPR TOK_ALLCOLREF)) ) ) - -> ^(TOK_UNIONALL {$t} $b) + -> ^($u {$t} $b) )+ o=orderByClause? c=clusterByClause? diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 9ba1865..b34a370 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -652,7 +652,7 @@ nonReserved | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_HOUR | KW_IDXPROPERTIES | KW_IGNORE | KW_INDEX | KW_INDEXES | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT | KW_ITEMS | KW_JAR | KW_KEYS | KW_KEY_TYPE | KW_LAST | KW_LIMIT | KW_OFFSET | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG - | KW_MAPJOIN | KW_MATERIALIZED | KW_METADATA | KW_MINUS | KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_NULLS | KW_OFFLINE + | KW_MAPJOIN | KW_MATERIALIZED | KW_METADATA | KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_NULLS | KW_OFFLINE | KW_OPTION | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PARTITIONED | KW_PARTITIONS | KW_PLUS | KW_PRETTY | KW_PRINCIPALS | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER | KW_RELOAD | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_REPLICATION | KW_RESTRICT | KW_REWRITE diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java index cccf0f6..f79e61b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java @@ -35,7 +35,7 @@ * */ public static enum Opcode { - NULLOP, UNION, INTERSECT, DIFF + NULLOP, UNION, INTERSECT, INTERSECTALL, DIFF }; private Opcode opcode; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 943d9d7..07bdf8c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -485,8 +485,7 @@ public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias, throws SemanticException { assert (ast.getToken() != null); - switch (ast.getToken().getType()) { - case HiveParser.TOK_QUERY: { + if (ast.getToken().getType() == HiveParser.TOK_QUERY) { QB qb = new QB(id, alias, true); qb.setInsideView(insideView); Phase1Ctx ctx_1 = initPhase1Ctx(); @@ -495,25 +494,35 @@ public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias, qbexpr.setOpcode(QBExpr.Opcode.NULLOP); qbexpr.setQB(qb); } - break; - case HiveParser.TOK_UNIONALL: { - qbexpr.setOpcode(QBExpr.Opcode.UNION); + // setop + else { + switch (ast.getToken().getType()) { + case HiveParser.TOK_UNIONALL: + qbexpr.setOpcode(QBExpr.Opcode.UNION); + break; + case HiveParser.TOK_INTERSECTALL: + qbexpr.setOpcode(QBExpr.Opcode.INTERSECTALL); + break; + case HiveParser.TOK_INTERSECTDISTINCT: + qbexpr.setOpcode(QBExpr.Opcode.INTERSECT); + break; + default: + throw new SemanticException("Unsupported set operator."); + } // query 1 assert (ast.getChild(0) != null); QBExpr qbexpr1 = new QBExpr(alias + SUBQUERY_TAG_1); - doPhase1QBExpr((ASTNode) ast.getChild(0), qbexpr1, id + SUBQUERY_TAG_1, - alias + SUBQUERY_TAG_1, insideView); + doPhase1QBExpr((ASTNode) ast.getChild(0), qbexpr1, id + SUBQUERY_TAG_1, alias + + SUBQUERY_TAG_1); qbexpr.setQBExpr1(qbexpr1); // query 2 assert (ast.getChild(1) != null); QBExpr qbexpr2 = new QBExpr(alias + SUBQUERY_TAG_2); - doPhase1QBExpr((ASTNode) ast.getChild(1), qbexpr2, id + SUBQUERY_TAG_2, - alias + SUBQUERY_TAG_2, insideView); + doPhase1QBExpr((ASTNode) ast.getChild(1), qbexpr2, id + SUBQUERY_TAG_2, alias + + SUBQUERY_TAG_2); qbexpr.setQBExpr2(qbexpr2); } - break; - } } private LinkedHashMap doPhase1GetAggregationsFromSelect( diff --git a/ql/src/test/queries/clientpositive/intersect.q b/ql/src/test/queries/clientpositive/intersect.q new file mode 100644 index 0000000..3ba6de7 --- /dev/null +++ b/ql/src/test/queries/clientpositive/intersect.q @@ -0,0 +1,40 @@ +set hive.mapred.mode=nonstrict; +set hive.cbo.enable=true; + +create table a(key int, value int); + +insert into table a values (1,2),(1,2),(1,3),(2,3); + +create table b(key int, value int); + +insert into table b values (1,2),(2,3); + +select key, value, count(1) as c from a group by key, value; + +select * from a intersect distinct select * from b; + +select * from b intersect distinct select * from a intersect distinct select * from b; + +select * from a intersect distinct select * from b union all select * from a intersect distinct select * from b; + +select * from a intersect distinct select * from b union select * from a intersect distinct select * from b; + +select * from a intersect distinct select * from b intersect distinct select * from a intersect distinct select * from b; + +select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +intersect distinct +select * from (select a.key, b.value from a join b on a.key=b.key)sub2; + +select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +intersect distinct +select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2; + +explain select * from src intersect distinct select * from src; + +select * from src intersect distinct select * from src; + +explain select * from src intersect distinct select * from src intersect distinct select * from src intersect distinct select * from src; + +select * from src intersect distinct select * from src intersect distinct select * from src intersect distinct select * from src; + + diff --git a/ql/src/test/results/clientpositive/intersect.q.out b/ql/src/test/results/clientpositive/intersect.q.out new file mode 100644 index 0000000..ee75780 --- /dev/null +++ b/ql/src/test/results/clientpositive/intersect.q.out @@ -0,0 +1,1163 @@ +PREHOOK: query: create table a(key int, value int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@a +POSTHOOK: query: create table a(key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@a +PREHOOK: query: insert into table a values (1,2),(1,2),(1,3),(2,3) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@a +POSTHOOK: query: insert into table a values (1,2),(1,2),(1,3),(2,3) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@a +POSTHOOK: Lineage: a.key EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: a.value EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: create table b(key int, value int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@b +POSTHOOK: query: create table b(key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@b +PREHOOK: query: insert into table b values (1,2),(2,3) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@b +POSTHOOK: query: insert into table b values (1,2),(2,3) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@b +POSTHOOK: Lineage: b.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: b.value EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select key, value, count(1) as c from a group by key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@a +#### A masked pattern was here #### +POSTHOOK: query: select key, value, count(1) as c from a group by key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +#### A masked pattern was here #### +1 2 2 +1 3 1 +2 3 1 +PREHOOK: query: select * from a intersect distinct select * from b +PREHOOK: type: QUERY +PREHOOK: Input: default@a +PREHOOK: Input: default@b +#### A masked pattern was here #### +POSTHOOK: query: select * from a intersect distinct select * from b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +POSTHOOK: Input: default@b +#### A masked pattern was here #### +1 2 +2 3 +PREHOOK: query: select * from b intersect distinct select * from a intersect distinct select * from b +PREHOOK: type: QUERY +PREHOOK: Input: default@a +PREHOOK: Input: default@b +#### A masked pattern was here #### +POSTHOOK: query: select * from b intersect distinct select * from a intersect distinct select * from b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +POSTHOOK: Input: default@b +#### A masked pattern was here #### +1 2 +2 3 +PREHOOK: query: select * from a intersect distinct select * from b union all select * from a intersect distinct select * from b +PREHOOK: type: QUERY +PREHOOK: Input: default@a +PREHOOK: Input: default@b +#### A masked pattern was here #### +POSTHOOK: query: select * from a intersect distinct select * from b union all select * from a intersect distinct select * from b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +POSTHOOK: Input: default@b +#### A masked pattern was here #### +1 2 +2 3 +PREHOOK: query: select * from a intersect distinct select * from b union select * from a intersect distinct select * from b +PREHOOK: type: QUERY +PREHOOK: Input: default@a +PREHOOK: Input: default@b +#### A masked pattern was here #### +POSTHOOK: query: select * from a intersect distinct select * from b union select * from a intersect distinct select * from b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +POSTHOOK: Input: default@b +#### A masked pattern was here #### +1 2 +2 3 +PREHOOK: query: select * from a intersect distinct select * from b intersect distinct select * from a intersect distinct select * from b +PREHOOK: type: QUERY +PREHOOK: Input: default@a +PREHOOK: Input: default@b +#### A masked pattern was here #### +POSTHOOK: query: select * from a intersect distinct select * from b intersect distinct select * from a intersect distinct select * from b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +POSTHOOK: Input: default@b +#### A masked pattern was here #### +1 2 +2 3 +PREHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +intersect distinct +select * from (select a.key, b.value from a join b on a.key=b.key)sub2 +PREHOOK: type: QUERY +PREHOOK: Input: default@a +PREHOOK: Input: default@b +#### A masked pattern was here #### +POSTHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +intersect distinct +select * from (select a.key, b.value from a join b on a.key=b.key)sub2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +POSTHOOK: Input: default@b +#### A masked pattern was here #### +1 2 +2 3 +PREHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +intersect distinct +select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2 +PREHOOK: type: QUERY +PREHOOK: Input: default@a +PREHOOK: Input: default@b +#### A masked pattern was here #### +POSTHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +intersect distinct +select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@a +POSTHOOK: Input: default@b +#### A masked pattern was here #### +PREHOOK: query: explain select * from src intersect distinct select * from src +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src intersect distinct select * from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1, Stage-3 + Stage-3 is a root stage + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Union + Statistics: Num rows: 28 Data size: 5610 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(_col2) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 28 Data size: 5610 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 28 Data size: 5610 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + TableScan + Union + Statistics: Num rows: 28 Data size: 5610 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(_col2) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 28 Data size: 5610 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 28 Data size: 5610 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col2 = 2) (type: boolean) + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from src intersect distinct select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from src intersect distinct select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +120 val_120 +125 val_125 +126 val_126 +128 val_128 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +136 val_136 +137 val_137 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +149 val_149 +15 val_15 +150 val_150 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +165 val_165 +166 val_166 +167 val_167 +168 val_168 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +174 val_174 +175 val_175 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +192 val_192 +193 val_193 +194 val_194 +195 val_195 +196 val_196 +197 val_197 +199 val_199 +2 val_2 +20 val_20 +200 val_200 +201 val_201 +202 val_202 +203 val_203 +205 val_205 +207 val_207 +208 val_208 +209 val_209 +213 val_213 +214 val_214 +216 val_216 +217 val_217 +218 val_218 +219 val_219 +221 val_221 +222 val_222 +223 val_223 +224 val_224 +226 val_226 +228 val_228 +229 val_229 +230 val_230 +233 val_233 +235 val_235 +237 val_237 +238 val_238 +239 val_239 +24 val_24 +241 val_241 +242 val_242 +244 val_244 +247 val_247 +248 val_248 +249 val_249 +252 val_252 +255 val_255 +256 val_256 +257 val_257 +258 val_258 +26 val_26 +260 val_260 +262 val_262 +263 val_263 +265 val_265 +266 val_266 +27 val_27 +272 val_272 +273 val_273 +274 val_274 +275 val_275 +277 val_277 +278 val_278 +28 val_28 +280 val_280 +281 val_281 +282 val_282 +283 val_283 +284 val_284 +285 val_285 +286 val_286 +287 val_287 +288 val_288 +289 val_289 +291 val_291 +292 val_292 +296 val_296 +298 val_298 +30 val_30 +302 val_302 +305 val_305 +306 val_306 +307 val_307 +308 val_308 +309 val_309 +310 val_310 +311 val_311 +315 val_315 +316 val_316 +317 val_317 +318 val_318 +321 val_321 +322 val_322 +323 val_323 +325 val_325 +327 val_327 +33 val_33 +331 val_331 +332 val_332 +333 val_333 +335 val_335 +336 val_336 +338 val_338 +339 val_339 +34 val_34 +341 val_341 +342 val_342 +344 val_344 +345 val_345 +348 val_348 +35 val_35 +351 val_351 +353 val_353 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +365 val_365 +366 val_366 +367 val_367 +368 val_368 +369 val_369 +37 val_37 +373 val_373 +374 val_374 +375 val_375 +377 val_377 +378 val_378 +379 val_379 +382 val_382 +384 val_384 +386 val_386 +389 val_389 +392 val_392 +393 val_393 +394 val_394 +395 val_395 +396 val_396 +397 val_397 +399 val_399 +4 val_4 +400 val_400 +401 val_401 +402 val_402 +403 val_403 +404 val_404 +406 val_406 +407 val_407 +409 val_409 +41 val_41 +411 val_411 +413 val_413 +414 val_414 +417 val_417 +418 val_418 +419 val_419 +42 val_42 +421 val_421 +424 val_424 +427 val_427 +429 val_429 +43 val_43 +430 val_430 +431 val_431 +432 val_432 +435 val_435 +436 val_436 +437 val_437 +438 val_438 +439 val_439 +44 val_44 +443 val_443 +444 val_444 +446 val_446 +448 val_448 +449 val_449 +452 val_452 +453 val_453 +454 val_454 +455 val_455 +457 val_457 +458 val_458 +459 val_459 +460 val_460 +462 val_462 +463 val_463 +466 val_466 +467 val_467 +468 val_468 +469 val_469 +47 val_47 +470 val_470 +472 val_472 +475 val_475 +477 val_477 +478 val_478 +479 val_479 +480 val_480 +481 val_481 +482 val_482 +483 val_483 +484 val_484 +485 val_485 +487 val_487 +489 val_489 +490 val_490 +491 val_491 +492 val_492 +493 val_493 +494 val_494 +495 val_495 +496 val_496 +497 val_497 +498 val_498 +5 val_5 +51 val_51 +53 val_53 +54 val_54 +57 val_57 +58 val_58 +64 val_64 +65 val_65 +66 val_66 +67 val_67 +69 val_69 +70 val_70 +72 val_72 +74 val_74 +76 val_76 +77 val_77 +78 val_78 +8 val_8 +80 val_80 +82 val_82 +83 val_83 +84 val_84 +85 val_85 +86 val_86 +87 val_87 +9 val_9 +90 val_90 +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 +PREHOOK: query: explain select * from src intersect distinct select * from src intersect distinct select * from src intersect distinct select * from src +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src intersect distinct select * from src intersect distinct select * from src intersect distinct select * from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1, Stage-3, Stage-4, Stage-5 + Stage-3 is a root stage + Stage-4 is a root stage + Stage-5 is a root stage + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Union + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(_col2) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + TableScan + Union + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(_col2) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + TableScan + Union + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(_col2) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + TableScan + Union + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(_col2) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 56 Data size: 11220 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 28 Data size: 5610 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col2 = 4) (type: boolean) + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from src intersect distinct select * from src intersect distinct select * from src intersect distinct select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from src intersect distinct select * from src intersect distinct select * from src intersect distinct select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +120 val_120 +125 val_125 +126 val_126 +128 val_128 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +136 val_136 +137 val_137 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +149 val_149 +15 val_15 +150 val_150 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +165 val_165 +166 val_166 +167 val_167 +168 val_168 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +174 val_174 +175 val_175 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +192 val_192 +193 val_193 +194 val_194 +195 val_195 +196 val_196 +197 val_197 +199 val_199 +2 val_2 +20 val_20 +200 val_200 +201 val_201 +202 val_202 +203 val_203 +205 val_205 +207 val_207 +208 val_208 +209 val_209 +213 val_213 +214 val_214 +216 val_216 +217 val_217 +218 val_218 +219 val_219 +221 val_221 +222 val_222 +223 val_223 +224 val_224 +226 val_226 +228 val_228 +229 val_229 +230 val_230 +233 val_233 +235 val_235 +237 val_237 +238 val_238 +239 val_239 +24 val_24 +241 val_241 +242 val_242 +244 val_244 +247 val_247 +248 val_248 +249 val_249 +252 val_252 +255 val_255 +256 val_256 +257 val_257 +258 val_258 +26 val_26 +260 val_260 +262 val_262 +263 val_263 +265 val_265 +266 val_266 +27 val_27 +272 val_272 +273 val_273 +274 val_274 +275 val_275 +277 val_277 +278 val_278 +28 val_28 +280 val_280 +281 val_281 +282 val_282 +283 val_283 +284 val_284 +285 val_285 +286 val_286 +287 val_287 +288 val_288 +289 val_289 +291 val_291 +292 val_292 +296 val_296 +298 val_298 +30 val_30 +302 val_302 +305 val_305 +306 val_306 +307 val_307 +308 val_308 +309 val_309 +310 val_310 +311 val_311 +315 val_315 +316 val_316 +317 val_317 +318 val_318 +321 val_321 +322 val_322 +323 val_323 +325 val_325 +327 val_327 +33 val_33 +331 val_331 +332 val_332 +333 val_333 +335 val_335 +336 val_336 +338 val_338 +339 val_339 +34 val_34 +341 val_341 +342 val_342 +344 val_344 +345 val_345 +348 val_348 +35 val_35 +351 val_351 +353 val_353 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +365 val_365 +366 val_366 +367 val_367 +368 val_368 +369 val_369 +37 val_37 +373 val_373 +374 val_374 +375 val_375 +377 val_377 +378 val_378 +379 val_379 +382 val_382 +384 val_384 +386 val_386 +389 val_389 +392 val_392 +393 val_393 +394 val_394 +395 val_395 +396 val_396 +397 val_397 +399 val_399 +4 val_4 +400 val_400 +401 val_401 +402 val_402 +403 val_403 +404 val_404 +406 val_406 +407 val_407 +409 val_409 +41 val_41 +411 val_411 +413 val_413 +414 val_414 +417 val_417 +418 val_418 +419 val_419 +42 val_42 +421 val_421 +424 val_424 +427 val_427 +429 val_429 +43 val_43 +430 val_430 +431 val_431 +432 val_432 +435 val_435 +436 val_436 +437 val_437 +438 val_438 +439 val_439 +44 val_44 +443 val_443 +444 val_444 +446 val_446 +448 val_448 +449 val_449 +452 val_452 +453 val_453 +454 val_454 +455 val_455 +457 val_457 +458 val_458 +459 val_459 +460 val_460 +462 val_462 +463 val_463 +466 val_466 +467 val_467 +468 val_468 +469 val_469 +47 val_47 +470 val_470 +472 val_472 +475 val_475 +477 val_477 +478 val_478 +479 val_479 +480 val_480 +481 val_481 +482 val_482 +483 val_483 +484 val_484 +485 val_485 +487 val_487 +489 val_489 +490 val_490 +491 val_491 +492 val_492 +493 val_493 +494 val_494 +495 val_495 +496 val_496 +497 val_497 +498 val_498 +5 val_5 +51 val_51 +53 val_53 +54 val_54 +57 val_57 +58 val_58 +64 val_64 +65 val_65 +66 val_66 +67 val_67 +69 val_69 +70 val_70 +72 val_72 +74 val_74 +76 val_76 +77 val_77 +78 val_78 +8 val_8 +80 val_80 +82 val_82 +83 val_83 +84 val_84 +85 val_85 +86 val_86 +87 val_87 +9 val_9 +90 val_90 +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98