diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 8bba7b6..a35d8cd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -101,8 +101,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { TableScanOperator tsop = (TableScanOperator) nd; AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; - PrunedPartitionList partList = - aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop); + PrunedPartitionList partList = aspCtx.getParseContext().getPrunedPartitions(tsop); Table table = tsop.getConf().getTableMetadata(); try { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index dda4f75..5cf4d53 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -498,6 +498,11 @@ public void setFetchTask(FetchTask fetchTask) { this.fetchTask = fetchTask; } + public PrunedPartitionList getPrunedPartitions(TableScanOperator ts) + throws SemanticException { + return getPrunedPartitions(ts.getConf().getAlias(), ts); + } + public PrunedPartitionList getPrunedPartitions(String alias, TableScanOperator ts) throws SemanticException { PrunedPartitionList partsList = opToPartList.get(ts); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index c9a5ce5..9f85ff6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -10286,10 +10286,14 @@ private void enforceScanLimits(ParseContext pCtx, FetchTask fTask) // check whether any of them break the limit for (Operator topOp : topOps.values()) { if (topOp instanceof TableScanOperator) { - if (((TableScanDesc)topOp.getConf()).getIsMetadataOnly()) { + TableScanOperator tsOp = (TableScanOperator) topOp; + if (tsOp.getConf().getIsMetadataOnly()) { + continue; + } + PrunedPartitionList parts = pCtx.getPrunedPartitions(tsOp); + if (!parts.getSourceTable().isPartitioned()) { continue; } - PrunedPartitionList parts = pCtx.getOpToPartList().get(topOp); if (parts.getPartitions().size() > scanLimit) { throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + parts.getPartitions().size(), "" + parts.getSourceTable().getTableName(), ""