diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index d18e1a7..91889ab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -101,8 +101,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { TableScanOperator tsop = (TableScanOperator) nd; AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; - PrunedPartitionList partList = - aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop); + PrunedPartitionList partList = aspCtx.getParseContext().getPrunedPartitions(tsop); Table table = tsop.getConf().getTableMetadata(); try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 6c1ab07..c43a12e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -461,6 +461,11 @@ public void setFetchTask(FetchTask fetchTask) { this.fetchTask = fetchTask; } + public PrunedPartitionList getPrunedPartitions(TableScanOperator ts) + throws SemanticException { + return getPrunedPartitions(ts.getConf().getAlias(), ts); + } + public PrunedPartitionList getPrunedPartitions(String alias, TableScanOperator ts) throws SemanticException { PrunedPartitionList partsList = opToPartList.get(ts); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 2466d78..3ece834 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -9947,10 +9947,14 @@ private void enforceScanLimits(ParseContext pCtx, FetchTask fTask) // check whether any of them break the limit for (Operator topOp : topOps.values()) { if (topOp instanceof TableScanOperator) { - if (((TableScanDesc)topOp.getConf()).getIsMetadataOnly()) { + TableScanOperator tsOp = (TableScanOperator) topOp; + if (tsOp.getConf().getIsMetadataOnly()) { + continue; + } + PrunedPartitionList parts = pCtx.getPrunedPartitions(tsOp); + if (!parts.getSourceTable().isPartitioned()) { continue; } - PrunedPartitionList parts = pCtx.getOpToPartList().get(topOp); if (parts.getPartitions().size() > scanLimit) { throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + parts.getPartitions().size(), "" + parts.getSourceTable().getTableName(), ""