diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 542ec96..fce5924 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -64,7 +64,6 @@ public class RelOptHiveTable extends RelOptAbstractTable { private final Table hiveTblMetadata; - private final String tblAlias; private final ImmutableList hiveNonPartitionCols; private final ImmutableList hivePartitionCols; private final ImmutableMap hiveNonPartitionColsMap; @@ -84,14 +83,13 @@ .getLog(RelOptHiveTable.class .getName()); - public RelOptHiveTable(RelOptSchema calciteSchema, String qualifiedTblName, String tblAlias, + public RelOptHiveTable(RelOptSchema calciteSchema, String qualifiedTblName, RelDataType rowType, Table hiveTblMetadata, List hiveNonPartitionCols, List hivePartitionCols, List hiveVirtualCols, HiveConf hconf, Map partitionCache, AtomicInteger noColsMissingStats, String qbID) { super(calciteSchema, qualifiedTblName, rowType); this.hiveTblMetadata = hiveTblMetadata; - this.tblAlias = tblAlias; this.hiveNonPartitionCols = ImmutableList.copyOf(hiveNonPartitionCols); this.hiveNonPartitionColsMap = HiveCalciteUtil.getColInfoMap(hiveNonPartitionCols, 0); this.hivePartitionCols = ImmutableList.copyOf(hivePartitionCols); @@ -135,7 +133,7 @@ public RelOptHiveTable copy(RelDataType newRowType) { } // 3. Build new Table - return new RelOptHiveTable(this.schema, this.name, this.tblAlias, newRowType, + return new RelOptHiveTable(this.schema, this.name, newRowType, this.hiveTblMetadata, newHiveNonPartitionCols, newHivePartitionCols, newHiveVirtualCols, this.hiveConf, this.partitionCache, this.noColsMissingStats, qbID); } @@ -224,21 +222,6 @@ public Table getHiveTableMD() { return hiveTblMetadata; } - public String getTableAlias() { - // NOTE: Calcite considers tbls to be equal if their names are the same. - // Hence - // we need to provide Calcite the fully qualified table name - // (dbname.tblname) - // and not the user provided aliases. - // However in HIVE DB name can not appear in select list; in case of join - // where table names differ only in DB name, Hive would require user - // introducing explicit aliases for tbl. - if (tblAlias == null) - return hiveTblMetadata.getTableName(); - else - return tblAlias; - } - private String getColNamesForLogging(Set colLst) { StringBuffer sb = new StringBuffer(); boolean firstEntry = true; @@ -442,7 +425,7 @@ public boolean containsPartitionColumnsOnly(ImmutableBitSet cols) { public List getVirtualCols() { return this.hiveVirtualCols; } - + public List getPartColumns() { return this.hivePartitionCols; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java index c8e9b52..6439983 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java @@ -57,6 +57,15 @@ private final RelDataType hiveTableScanRowType; private final ImmutableList neededColIndxsFrmReloptHT; + private final String tblAlias; + + public String getTableAlias() { + return tblAlias; + } + + public RelDataType getHiveTableScanRowType() { + return hiveTableScanRowType; + } /** * Creates a HiveTableScan. @@ -70,14 +79,15 @@ * @param table * HiveDB table */ - public HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table) { - this(cluster, traitSet, table, table.getRowType()); + public HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table, String alias) { + this(cluster, traitSet, table, alias, table.getRowType()); } - private HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table, + private HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table, String alias, RelDataType newRowtype) { super(cluster, TraitsUtil.getDefaultTraitSet(cluster), table); assert getConvention() == HiveRelNode.CONVENTION; + this.tblAlias = alias; this.hiveTableScanRowType = newRowtype; this.neededColIndxsFrmReloptHT = buildNeededColIndxsFrmReloptHT(table.getRowType(), newRowtype); } @@ -91,12 +101,12 @@ public RelNode copy(RelTraitSet traitSet, List inputs) { /** * Copy TableScan operator with a new Row Schema. The new Row Schema can only * be a subset of this TS schema. - * + * * @param newRowtype * @return */ public HiveTableScan copy(RelDataType newRowtype) { - return new HiveTableScan(getCluster(), getTraitSet(), ((RelOptHiveTable) table), + return new HiveTableScan(getCluster(), getTraitSet(), ((RelOptHiveTable) table), this.tblAlias, newRowtype); } @@ -119,7 +129,7 @@ public void implement(Implementor implementor) { public double getRows() { return ((RelOptHiveTable) table).getRowCount(); } - + public List getColStat(List projIndxLst) { return ((RelOptHiveTable) table).getColStat(projIndxLst); } @@ -163,7 +173,7 @@ public RelNode project(ImmutableBitSet fieldsUsed, Set extraFi public List getNeededColIndxsFrmReloptHT() { return neededColIndxsFrmReloptHT; } - + public RelDataType getPrunedRowType() { return hiveTableScanRowType; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java index 8c8e288..1f5d919 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.common.type.HiveIntervalDayTime; import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; @@ -68,7 +69,7 @@ static ASTNode table(TableScan scan) { // However in HIVE DB name can not appear in select list; in case of join // where table names differ only in DB name, Hive would require user // introducing explicit aliases for tbl. - b.add(HiveParser.Identifier, hTbl.getTableAlias()); + b.add(HiveParser.Identifier, ((HiveTableScan)scan).getTableAlias()); return b.node(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java index 2b8dd08..0ada068 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter.HiveToken; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.HiveParser; @@ -70,8 +71,8 @@ public class ASTConverter { private static final Log LOG = LogFactory.getLog(ASTConverter.class); - private RelNode root; - private HiveAST hiveAST; + private final RelNode root; + private final HiveAST hiveAST; private RelNode from; private Filter where; private Aggregate groupBy; @@ -214,7 +215,7 @@ else if (aggregateType == Group.CUBE) { private void convertLimitToASTNode(HiveSort limit) { if (limit != null) { - HiveSort hiveLimit = (HiveSort) limit; + HiveSort hiveLimit = limit; RexNode limitExpr = hiveLimit.getFetchExpr(); if (limitExpr != null) { Object val = ((RexLiteral) limitExpr).getValue2(); @@ -225,12 +226,12 @@ private void convertLimitToASTNode(HiveSort limit) { private void convertOBToASTNode(HiveSort order) { if (order != null) { - HiveSort hiveSort = (HiveSort) order; + HiveSort hiveSort = order; if (!hiveSort.getCollation().getFieldCollations().isEmpty()) { // 1 Add order by token ASTNode orderAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY"); - schema = new Schema((HiveSort) hiveSort); + schema = new Schema(hiveSort); Map obRefToCallMap = hiveSort.getInputRefToCallMap(); RexNode obExpr; ASTNode astCol; @@ -371,7 +372,7 @@ public void visit(RelNode node, int ordinal, RelNode parent) { static class RexVisitor extends RexVisitorImpl { private final Schema schema; - private boolean useTypeQualInLiteral; + private final boolean useTypeQualInLiteral; protected RexVisitor(Schema schema) { this(schema, false); @@ -568,7 +569,7 @@ public QueryBlockInfo(Schema schema, ASTNode ast) { private static final long serialVersionUID = 1L; Schema(TableScan scan) { - String tabName = ((RelOptHiveTable) scan.getTable()).getTableAlias(); + String tabName = ((HiveTableScan) scan).getTableAlias(); for (RelDataTypeField field : scan.getRowType().getFieldList()) { add(new ColumnInfo(tabName, field.getName())); } @@ -631,7 +632,7 @@ public QueryBlockInfo(Schema schema, ASTNode ast) { * 1. Project will always be child of Sort.
* 2. In Calcite every projection in Project is uniquely named * (unambigous) without using table qualifier (table name).
- * + * * @param order * Hive Sort Node * @return Schema diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java index 576b18c..9cc8ed1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java @@ -179,7 +179,7 @@ OpAttr dispatch(RelNode rn) throws SemanticException { /** * TODO: 1. PPD needs to get pushed in to TS - * + * * @param scanRel * @return */ @@ -207,7 +207,7 @@ OpAttr visit(HiveTableScan scanRel) { Map posToNonPartColInfo = ht.getNonPartColInfoMap(); List neededColIndxsFrmReloptHT = scanRel.getNeededColIndxsFrmReloptHT(); List scanColNames = scanRel.getRowType().getFieldNames(); - String tableAlias = ht.getTableAlias(); + String tableAlias = scanRel.getTableAlias(); String colName; ColumnInfo colInfo; @@ -269,7 +269,7 @@ OpAttr visit(HiveProject projectRel) throws SemanticException { ExprNodeConverter converter = new ExprNodeConverter(inputOpAf.tabAlias, projectRel .getRowType().getFieldNames().get(pos), projectRel.getInput().getRowType(), projectRel.getRowType(), false, projectRel.getCluster().getTypeFactory()); - exprCols.add((ExprNodeDesc) projectRel.getChildExps().get(pos).accept(converter)); + exprCols.add(projectRel.getChildExps().get(pos).accept(converter)); if (converter.getWindowFunctionSpec() != null) { windowingSpec.addWindowFunction(converter.getWindowFunctionSpec()); } @@ -403,7 +403,7 @@ OpAttr visit(HiveSort sortRel) throws SemanticException { LimitDesc limitDesc = new LimitDesc(limit); // TODO: Set 'last limit' global property ArrayList cinfoLst = createColInfos(inputOp); - resultOp = (LimitOperator) OperatorFactory.getAndMakeChild(limitDesc, + resultOp = OperatorFactory.getAndMakeChild(limitDesc, new RowSchema(cinfoLst), resultOp); if (LOG.isDebugEnabled()) { @@ -585,7 +585,7 @@ private static SelectOperator genReduceSinkAndBacktrackSelect(Operator input, acidOperation, strictMode); // 2. Generate backtrack Select operator - Map descriptors = buildBacktrackFromReduceSink((ReduceSinkOperator) rsOp, + Map descriptors = buildBacktrackFromReduceSink(rsOp, input); SelectDesc selectDesc = new SelectDesc(new ArrayList(descriptors.values()), new ArrayList(descriptors.keySet())); @@ -848,7 +848,7 @@ private static JoinType extractJoinType(HiveJoin join) { } private static ExprNodeDesc convertToExprNode(RexNode rn, RelNode inputRel, String tabAlias) { - return (ExprNodeDesc) rn.accept(new ExprNodeConverter(tabAlias, inputRel.getRowType(), false, + return rn.accept(new ExprNodeConverter(tabAlias, inputRel.getRowType(), false, inputRel.getCluster().getTypeFactory())); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java index a6d564b..cba37bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java @@ -103,7 +103,7 @@ private static String getTblAlias(RelNode rel) { return null; } if (rel instanceof HiveTableScan) { - return ((RelOptHiveTable)((HiveTableScan)rel).getTable()).getTableAlias(); + return ((HiveTableScan)rel).getTableAlias(); } if (rel instanceof Project) { return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 983350a..07af0fd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -611,7 +611,7 @@ Operator getOptimizedHiveOPDag() throws SemanticException { RelNode modifiedOptimizedOptiqPlan = introduceProjectIfNeeded(optimizedOptiqPlan); - Operator hiveRoot = new HiveOpConverter(this, conf, unparseTranslator, topOps, + Operator hiveRoot = new HiveOpConverter(this, conf, unparseTranslator, topOps, conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict")).convert(modifiedOptimizedOptiqPlan); RowResolver hiveRootRR = genRowResolver(hiveRoot, getQB()); opParseCtx.put(hiveRoot, new OpParseContext(hiveRootRR)); @@ -1332,11 +1332,11 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc fullyQualifiedTabName = tabMetaData.getTableName(); } RelOptHiveTable optTable = new RelOptHiveTable(relOptSchema, fullyQualifiedTabName, - tableAlias, rowType, tabMetaData, nonPartitionColumns, partitionColumns, virtualCols, conf, + rowType, tabMetaData, nonPartitionColumns, partitionColumns, virtualCols, conf, partitionCache, noColsMissingStats, getAliasId(tableAlias, qb)); // 5. Build Hive Table Scan Rel - tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable); + tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, null == tableAlias ? tabMetaData.getTableName() : tableAlias); // 6. Add Schema(RR) to RelNode-Schema map ImmutableMap hiveToCalciteColMap = buildHiveToCalciteColumnMap(rr, @@ -1922,7 +1922,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException } else if (qbp.getDestGroupingSets().contains(detsClauseName)) { groupingSets = getGroupingSets(grpByAstExprs, qbp, detsClauseName); } - + final int limit = groupingColsSize * 2; while (groupingColsSize < limit) { String field = getColumnInternalName(groupingColsSize);