diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 388a604..ede3309 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -557,6 +557,7 @@ HIVELIMITOPTENABLE("hive.limit.optimize.enable", false), HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000), HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f), + HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1), HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000), HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75), diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 1ddee49..2473f15 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -388,6 +388,8 @@ DYNAMIC_PARTITIONS_TOO_MANY_PER_NODE_ERROR(20004, "Fatal error occurred when node " + "tried to create too many dynamic partitions. The maximum number of dynamic partitions " + "is controlled by hive.exec.max.dynamic.partitions and hive.exec.max.dynamic.partitions.pernode. "), + PARTITION_SCAN_LIMIT_EXCEEDED(20005, "Number of partitions scanned (={0}) on table {1} exceeds limit" + + " (={2}). This is controlled by hive.limit.query.max.table.partition.", true), //========================== 30000 range starts here ========================// STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " + diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 89d2a9c..67aefae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.FunctionInfo; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -9260,6 +9261,11 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { optm.initialize(conf); pCtx = optm.optimize(); + if (!ctx.getExplain()) { + // if desired check we're not going over partition scan limits + enforceScanLimits(pCtx, conf); + } + if (LOG.isDebugEnabled()) { LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values())); } @@ -9285,6 +9291,39 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { return; } + private void enforceScanLimits(ParseContext pCtx, HiveConf conf) throws SemanticException { + int scanLimit = HiveConf.getIntVar(conf,HiveConf.ConfVars.HIVELIMITTABLESCANPARTITION); + + if (scanLimit != -1) { + // a scan limit on the number of partitions has been set by the user + if (pCtx.getFetchTask() != null) { + // having a fetch task at this point means that we're not going to launch a job on the cluster + FetchTask fTask = pCtx.getFetchTask(); + // check partition numbers for simple fetch query + if (!fTask.getWork().isNotPartitioned() && fTask.getWork().getLimit() == -1 + && scanLimit < fTask.getWork().getPartDir().size()) { + throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, + ""+fTask.getWork().getPartDir().size(), + ""+fTask.getWork().getTblDesc().getTableName(), + ""+scanLimit); + } + } else { + // At this point we've run the partition pruner for all top ops. Let's check whether any of them break the limit + for (Operator topOp: topOps.values()) { + if (topOp instanceof TableScanOperator) { + PrunedPartitionList parts = pCtx.getOpToPartList().get((TableScanOperator)topOp); + if (parts.getPartitions().size() > scanLimit) { + throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, + ""+parts.getPartitions().size(), + ""+parts.getSourceTable().getTableName(), + ""+scanLimit); + } + } + } + } + } + } + @Override public List getResultSchema() { return resultSchema; diff --git ql/src/test/queries/clientnegative/limit_partition.q ql/src/test/queries/clientnegative/limit_partition.q new file mode 100644 index 0000000..d593945 --- /dev/null +++ ql/src/test/queries/clientnegative/limit_partition.q @@ -0,0 +1,7 @@ +set hive.limit.query.max.table.partition=1; + +explain select * from srcpart limit 1; +select * from srcpart limit 1; + +explain select * from srcpart; +select * from srcpart; diff --git ql/src/test/queries/clientnegative/limit_partition_2.q ql/src/test/queries/clientnegative/limit_partition_2.q new file mode 100644 index 0000000..e91adab --- /dev/null +++ ql/src/test/queries/clientnegative/limit_partition_2.q @@ -0,0 +1,7 @@ +set hive.limit.query.max.table.partition=1; + +explain select ds from srcpart where hr=11 and ds='2008-04-08'; +select ds from srcpart where hr=11 and ds='2008-04-08'; + +explain select distinct hr from srcpart; +select distinct hr from srcpart; diff --git ql/src/test/queries/clientnegative/limit_partition_3.q ql/src/test/queries/clientnegative/limit_partition_3.q new file mode 100644 index 0000000..0afd4a9 --- /dev/null +++ ql/src/test/queries/clientnegative/limit_partition_3.q @@ -0,0 +1,18 @@ +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.stats.autogather=true; +set hive.compute.query.using.stats=true; + +create table part (c int) partitioned by (d string); +insert into table part partition (d) +select hr,ds from srcpart; + +set hive.limit.query.max.table.partition=1; + +explain select count(*) from part; +select count(*) from part; + +set hive.compute.query.using.stats=false; + +explain select count(*) from part; +select count(*) from part; diff --git ql/src/test/results/clientnegative/limit_partition.q.out ql/src/test/results/clientnegative/limit_partition.q.out new file mode 100644 index 0000000..2dba640 --- /dev/null +++ ql/src/test/results/clientnegative/limit_partition.q.out @@ -0,0 +1,63 @@ +PREHOOK: query: explain select * from srcpart limit 1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from srcpart limit 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + TableScan + alias: srcpart + Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: select * from srcpart limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select * from srcpart limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +238 val_238 2008-04-08 11 +PREHOOK: query: explain select * from srcpart +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from srcpart +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: srcpart + Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 116 Data size: 23248 Basic stats: COMPLETE Column stats: NONE + ListSink + +FAILED: SemanticException Number of partitions scanned (=4) on table default.srcpart exceeds limit (=1). This is controlled by hive.limit.query.max.table.partition. diff --git ql/src/test/results/clientnegative/limit_partition_2.q.out ql/src/test/results/clientnegative/limit_partition_2.q.out new file mode 100644 index 0000000..26f327d --- /dev/null +++ ql/src/test/results/clientnegative/limit_partition_2.q.out @@ -0,0 +1,593 @@ +PREHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +2008-04-08 +PREHOOK: query: explain select distinct hr from srcpart +PREHOOK: type: QUERY +POSTHOOK: query: explain select distinct hr from srcpart +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: hr (type: string) + outputColumnNames: hr + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Group By Operator + keys: hr (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +FAILED: SemanticException Number of partitions scanned (=4) on table srcpart exceeds limit (=1). This is controlled by hive.limit.query.max.table.partition. diff --git ql/src/test/results/clientnegative/limit_partition_3.q.out ql/src/test/results/clientnegative/limit_partition_3.q.out new file mode 100644 index 0000000..7877b95 --- /dev/null +++ ql/src/test/results/clientnegative/limit_partition_3.q.out @@ -0,0 +1,102 @@ +PREHOOK: query: create table part (c int) partitioned by (d string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table part (c int) partitioned by (d string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@part +PREHOOK: query: insert into table part partition (d) +select hr,ds from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@part +POSTHOOK: query: insert into table part partition (d) +select hr,ds from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@part@d=2008-04-08 +POSTHOOK: Output: default@part@d=2008-04-09 +POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +PREHOOK: query: explain select count(*) from part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from part +POSTHOOK: type: QUERY +POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + +PREHOOK: query: select count(*) from part +PREHOOK: type: QUERY +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from part +POSTHOOK: type: QUERY +#### A masked pattern was here #### +POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +2000 +PREHOOK: query: explain select count(*) from part +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from part +POSTHOOK: type: QUERY +POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: part + Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + +FAILED: SemanticException Number of partitions scanned (=2) on table part exceeds limit (=1). This is controlled by hive.limit.query.max.table.partition.