Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java (revision 1617644) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java (working copy) @@ -48,6 +48,7 @@ Map m_hiveColStatsMap = new HashMap(); private Integer m_numPartitions; PrunedPartitionList partitionList; + Map partitionCache; protected static final Log LOG = LogFactory .getLog(RelOptHiveTable.class @@ -55,7 +56,7 @@ public RelOptHiveTable(RelOptSchema optiqSchema, String name, RelDataType rowType, Table hiveTblMetadata, List hiveNonPartitionCols, - List hivePartitionCols, HiveConf hconf) { + List hivePartitionCols, HiveConf hconf, Map partitionCache) { super(optiqSchema, name, rowType); m_hiveTblMetadata = hiveTblMetadata; m_hiveNonPartitionCols = ImmutableList.copyOf(hiveNonPartitionCols); @@ -63,6 +64,7 @@ m_hivePartitionColsMap = getColInfoMap(hivePartitionCols, m_hiveNonPartitionColsMap.size()); m_noOfProjs = hiveNonPartitionCols.size() + hivePartitionCols.size(); m_hiveConf = hconf; + this.partitionCache = partitionCache; } private static ImmutableMap getColInfoMap(List hiveCols, @@ -96,25 +98,13 @@ @Override public double getRowCount() { if (m_rowCount == -1) { - if (m_hiveTblMetadata.isPartitioned()) { - if (partitionList == null) { - try { - List parts = new ArrayList( - Hive.get().getAllPartitionsOf(m_hiveTblMetadata)); + computePartitionList(m_hiveConf, null); List rowCounts = StatsUtils.getBasicStatForPartitions( - m_hiveTblMetadata, parts, StatsSetupConst.ROW_COUNT); - m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); - - } catch (HiveException he) { - throw new RuntimeException(he); - } - } else { - List rowCounts = StatsUtils.getBasicStatForPartitions( m_hiveTblMetadata, partitionList.getNotDeniedPartns(), StatsSetupConst.ROW_COUNT); m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); - } + } else { m_rowCount = StatsUtils.getNumRows(m_hiveTblMetadata); } @@ -152,16 +142,14 @@ try { if (pruneNode == null || InputFinder.bits(pruneNode).length() == 0 ) { // there is no predicate on partitioning column, we need all partitions in this case. - partitionList = PartitionPruner.prune(m_hiveTblMetadata, null, conf, getName(), - new HashMap()); + partitionList = PartitionPruner.prune(m_hiveTblMetadata, null, conf, getName(), partitionCache); return; } // We have valid pruning expressions, only retrieve qualifying partitions ExprNodeDesc pruneExpr = pruneNode.accept(new ExprNodeConverter(getName(), getRowType(), true)); - partitionList = PartitionPruner.prune(m_hiveTblMetadata, pruneExpr, conf, getName(), - new HashMap()); + partitionList = PartitionPruner.prune(m_hiveTblMetadata, pruneExpr, conf, getName(), partitionCache); } catch (HiveException he) { throw new RuntimeException(he); } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java (revision 1617644) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java (working copy) @@ -87,7 +87,7 @@ . builder() .put( "count", - (Aggregation) SqlStdOperatorTable.COUNT) + SqlStdOperatorTable.COUNT) .put("sum", SqlStdOperatorTable.SUM) .put("min", SqlStdOperatorTable.MIN) .put("max", SqlStdOperatorTable.MAX) @@ -254,6 +254,7 @@ } static class JoinProcessor implements NodeProcessor { + @Override @SuppressWarnings("unchecked") public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { @@ -408,6 +409,7 @@ } static class FilterProcessor implements NodeProcessor { + @Override @SuppressWarnings("unchecked") public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { @@ -434,6 +436,7 @@ } static class SelectProcessor implements NodeProcessor { + @Override @SuppressWarnings("unchecked") public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { @@ -454,6 +457,7 @@ */ List oFieldNames = Lists.transform(selectOp.getConf().getOutputColumnNames(), new Function() { + @Override public String apply(String hName) { return "_o_" + hName; } @@ -467,6 +471,7 @@ } static class LimitProcessor implements NodeProcessor { + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { Context ctx = (Context) procCtx; @@ -492,6 +497,7 @@ } static class GroupByProcessor implements NodeProcessor { + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { Context ctx = (Context) procCtx; @@ -524,6 +530,7 @@ // noinspection unchecked input = HiveProjectRel.create(input, CompositeList.of(Lists.transform(input.getRowType() .getFieldList(), new Function() { + @Override public RexNode apply(RelDataTypeField input) { return new RexInputRef(input.getIndex(), input.getType()); } @@ -542,6 +549,7 @@ } static class ReduceSinkProcessor implements NodeProcessor { + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { Context ctx = (Context) procCtx; @@ -581,6 +589,7 @@ // noinspection unchecked input = HiveProjectRel.create(input, CompositeList.of(Lists.transform(input.getRowType() .getFieldList(), new Function() { + @Override public RexNode apply(RelDataTypeField input) { return new RexInputRef(input.getIndex(), input.getType()); } @@ -612,6 +621,7 @@ } static class TableScanProcessor implements NodeProcessor { + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { Context ctx = (Context) procCtx; @@ -634,7 +644,7 @@ } RelDataType rowType = TypeConverter.getType(ctx.cluster, rr, neededCols); RelOptHiveTable optTable = new RelOptHiveTable(ctx.schema, tableScanOp.getConf().getAlias(), - rowType, ctx.sA.getTable(tableScanOp), null, null, null); + rowType, ctx.sA.getTable(tableScanOp), null, null, null, null); TableAccessRelBase tableRel = new HiveTableScanRel(ctx.cluster, ctx.cluster.traitSetOf(HiveRel.CONVENTION), optTable, rowType); ctx.buildColumnMap(tableScanOp, tableRel); @@ -673,12 +683,13 @@ } static class DefaultProcessor implements NodeProcessor { + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { @SuppressWarnings("unchecked") Operator op = (Operator) nd; Context ctx = (Context) procCtx; - RelNode node = (HiveRel) ctx.getParentNode(op, 0); + RelNode node = ctx.getParentNode(op, 0); ctx.hiveOpToRelNode.put(op, node); return node; } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1617644) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -9568,7 +9568,7 @@ try { // 1. Gen Optimized AST - ASTNode newAST = new OptiqBasedPlanner().getOptimizedAST(); + ASTNode newAST = new OptiqBasedPlanner().getOptimizedAST(prunedPartitions); // 2. Regen OP plan from optimized AST init(); @@ -11785,17 +11785,18 @@ private class OptiqBasedPlanner implements Frameworks.PlannerAction { RelOptCluster m_cluster; RelOptSchema m_relOptSchema; - SchemaPlus m_rootSchema; SemanticException m_semanticException; + Map partitionCache; // TODO: Do we need to keep track of RR, ColNameToPosMap for every op or // just last one. LinkedHashMap m_relToHiveRR = new LinkedHashMap(); LinkedHashMap> m_relToHiveColNameOptiqPosMap = new LinkedHashMap>(); - private ASTNode getOptimizedAST() throws SemanticException { + private ASTNode getOptimizedAST(Map partitionCache) throws SemanticException { ASTNode optiqOptimizedAST = null; RelNode optimizedOptiqPlan = null; + this.partitionCache = partitionCache; try { optimizedOptiqPlan = Frameworks.withPlanner(this); @@ -11826,7 +11827,6 @@ m_cluster = cluster; m_relOptSchema = relOptSchema; - m_rootSchema = rootSchema; try { optiqGenPlan = genLogicalPlan(qb); @@ -12147,7 +12147,7 @@ // 4. Build RelOptAbstractTable RelOptHiveTable optTable = new RelOptHiveTable(m_relOptSchema, - tableAlias, rowType, tab, nonPartitionColumns, partitionColumns, conf); + tableAlias, rowType, tab, nonPartitionColumns, partitionColumns, conf, partitionCache); // 5. Build Hive Table Scan Rel tableRel = new HiveTableScanRel(m_cluster,