From cabe5b89938ced5b7137a588b3719cddd3072ba1 Mon Sep 17 00:00:00 2001 From: Ashutosh Chauhan Date: Fri, 16 Oct 2015 18:41:48 -0700 Subject: [PATCH] HIVE-10807 : Invalidate basic stats for insert queries if autogather=false (Ashutosh Chauhan via Gopal V) --- .../org/apache/hadoop/hive/ql/QueryProperties.java | 10 -- .../org/apache/hadoop/hive/ql/metadata/Hive.java | 12 ++ .../hadoop/hive/ql/optimizer/GenMapRedUtils.java | 3 +- .../hadoop/hive/ql/optimizer/StatsOptimizer.java | 38 ++++-- .../apache/hadoop/hive/ql/parse/QBParseInfo.java | 9 -- .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 6 - ql/src/test/queries/clientpositive/insert_into1.q | 10 +- ql/src/test/queries/clientpositive/insert_into2.q | 8 ++ .../results/clientpositive/bucket_map_join_1.q.out | 4 - .../results/clientpositive/bucket_map_join_2.q.out | 4 - .../encryption_insert_partition_dynamic.q.out | 4 - .../encryption_join_unencrypted_tbl.q.out | 4 - .../test/results/clientpositive/insert_into1.q.out | 151 +++++++++++++++++++++ .../test/results/clientpositive/insert_into2.q.out | 69 ++++++++++ .../clientpositive/spark/bucket_map_join_1.q.out | 8 -- .../clientpositive/spark/bucket_map_join_2.q.out | 8 -- .../clientpositive/spark/insert_into1.q.out | 116 ++++++++++++++++ .../clientpositive/spark/insert_into2.q.out | 75 ++++++++++ .../test/results/clientpositive/spark/stats3.q.out | 2 - ql/src/test/results/clientpositive/stats3.q.out | 2 - .../results/clientpositive/tez/insert_into1.q.out | 120 ++++++++++++++++ 21 files changed, 586 insertions(+), 77 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java index e8f7fba..3bc9432 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java @@ -39,7 +39,6 @@ boolean noScanAnalyzeCommand; boolean analyzeRewrite; boolean ctas; - boolean insertToTable; int outerQueryLimit; boolean hasJoin = false; @@ -115,14 +114,6 @@ public void setCTAS(boolean ctas) { this.ctas = ctas; } - public boolean isInsertToTable() { - return insertToTable; - } - - public void setInsertToTable(boolean insertToTable) { - this.insertToTable = insertToTable; - } - public int getOuterQueryLimit() { return outerQueryLimit; } @@ -276,7 +267,6 @@ public void clear() { noScanAnalyzeCommand = false; analyzeRewrite = false; ctas = false; - insertToTable = false; outerQueryLimit = -1; hasJoin = false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 4e3be0d..5aa1742 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1467,6 +1467,7 @@ public Partition loadPartition(Path loadPath, Table tbl, boolean forceCreate = (!holdDDLTime) ? true : false; newTPart = getPartition(tbl, partSpec, forceCreate, newPartPath.toString(), inheritTableSpecs, newFiles); + // recreate the partition if it existed before if (!holdDDLTime) { if (isSkewedStoreAsSubdir) { @@ -1478,12 +1479,19 @@ public Partition loadPartition(Path loadPath, Table tbl, /* Add list bucketing location mappings. */ skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); newCreatedTpart.getSd().setSkewedInfo(skewedInfo); + if(!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + newTPart.getParameters().put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false"); + } alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newCreatedTpart)); newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs, newFiles); return new Partition(tbl, newCreatedTpart); } } + if(!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + newTPart.getParameters().put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false"); + alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newTPart.getTPartition())); + } } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -1720,6 +1728,10 @@ public void loadTable(Path loadPath, String tableName, boolean replace, } catch (IOException e) { throw new HiveException("addFiles: filesystem error in check phase", e); } + } + if(!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + tbl.getParameters().put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false"); + } else { tbl.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, "true"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 109b938..c22c35f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1500,8 +1500,7 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, * @return */ public static boolean isInsertInto(ParseContext parseCtx, FileSinkOperator fsOp) { - return fsOp.getConf().getTableInfo().getTableName() != null && - parseCtx.getQueryProperties().isInsertToTable(); + return fsOp.getConf().getTableInfo().getTableName() != null; } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index 5a21e6b..aa204c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -144,17 +144,23 @@ public MetaDataProcessor (ParseContext pctx) { } enum LongSubType { - BIGINT { Object cast(long longValue) { return longValue; } }, - INT { Object cast(long longValue) { return (int)longValue; } }, - SMALLINT { Object cast(long longValue) { return (short)longValue; } }, - TINYINT { Object cast(long longValue) { return (byte)longValue; } }; + BIGINT { @Override + Object cast(long longValue) { return longValue; } }, + INT { @Override + Object cast(long longValue) { return (int)longValue; } }, + SMALLINT { @Override + Object cast(long longValue) { return (short)longValue; } }, + TINYINT { @Override + Object cast(long longValue) { return (byte)longValue; } }; abstract Object cast(long longValue); } enum DoubleSubType { - DOUBLE { Object cast(double doubleValue) { return doubleValue; } }, - FLOAT { Object cast(double doubleValue) { return (float) doubleValue; } }; + DOUBLE { @Override + Object cast(double doubleValue) { return doubleValue; } }, + FLOAT { @Override + Object cast(double doubleValue) { return (float) doubleValue; } }; abstract Object cast(double doubleValue); } @@ -221,7 +227,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Since we have done an exact match on TS-SEL-GBY-RS-GBY-(SEL)-FS // we need not to do any instanceof checks for following. GroupByOperator pgbyOp = (GroupByOperator)stack.get(2); - if (pgbyOp.getConf().getOutputColumnNames().size() != + if (pgbyOp.getConf().getOutputColumnNames().size() != pgbyOp.getConf().getAggregators().size()) { return null; } @@ -260,7 +266,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, FileSinkOperator fsOp = (FileSinkOperator)last; if (fsOp.getNumChild() > 0) { // looks like a subq plan. - return null; // todo we can collapse this part of tree into single TS + return null; // todo we can collapse this part of tree into single TS } Table tbl = tsOp.getConf().getTableMetadata(); @@ -296,7 +302,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } switch (category) { - case LONG: + case LONG: oneRow.add(Long.valueOf(constant) * rowCnt); break; case DOUBLE: @@ -436,7 +442,7 @@ else if (udaf instanceof GenericUDAFCount) { switch (type) { case Integeral: { LongSubType subType = LongSubType.valueOf(name); - + Long maxVal = null; Collection> result = verifyAndGetPartStats(hive, tbl, colName, parts); @@ -462,7 +468,7 @@ else if (udaf instanceof GenericUDAFCount) { } case Double: { DoubleSubType subType = DoubleSubType.valueOf(name); - + Double maxVal = null; Collection> result = verifyAndGetPartStats(hive, tbl, colName, parts); @@ -537,7 +543,7 @@ else if (udaf instanceof GenericUDAFCount) { switch(type) { case Integeral: { LongSubType subType = LongSubType.valueOf(name); - + Long minVal = null; Collection> result = verifyAndGetPartStats(hive, tbl, colName, parts); @@ -563,7 +569,7 @@ else if (udaf instanceof GenericUDAFCount) { } case Double: { DoubleSubType subType = DoubleSubType.valueOf(name); - + Double minVal = null; Collection> result = verifyAndGetPartStats(hive, tbl, colName, parts); @@ -680,6 +686,9 @@ private Long getRowCnt( if (tbl.isPartitioned()) { for (Partition part : pctx.getPrunedPartitions( tsOp.getConf().getAlias(), tsOp).getPartitions()) { + if (!StatsSetupConst.areStatsUptoDate(part.getParameters())) { + return null; + } long partRowCnt = Long.parseLong(part.getParameters().get(StatsSetupConst.ROW_COUNT)); if (partRowCnt < 1) { Log.debug("Partition doesn't have upto date stats " + part.getSpec()); @@ -688,6 +697,9 @@ private Long getRowCnt( rowCnt += partRowCnt; } } else { // unpartitioned table + if (!StatsSetupConst.areStatsUptoDate(tbl.getParameters())) { + return null; + } rowCnt = Long.parseLong(tbl.getProperty(StatsSetupConst.ROW_COUNT)); if (rowCnt < 1) { // if rowCnt < 1 than its either empty table or table on which stats are not diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java index 14a7e9c..9072d7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java @@ -65,7 +65,6 @@ private final HashSet insertIntoTables; private boolean isAnalyzeCommand; // used for the analyze command (statistics) - private boolean isInsertToTable; // used for insert overwrite command (statistics) private boolean isNoScanAnalyzeCommand; // used for the analyze command (statistics) (noscan) private boolean isPartialScanAnalyzeCommand; // used for the analyze command (statistics) // (partialscan) @@ -550,14 +549,6 @@ public boolean isAnalyzeCommand() { return isAnalyzeCommand; } - public void setIsInsertToTable(boolean isInsertToTable) { - this.isInsertToTable = isInsertToTable; - } - - public boolean isInsertToTable() { - return isInsertToTable; - } - public void addTableSpec(String tName, TableSpec tSpec) { tableSpecs.put(tName, tSpec); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 3262887..f9cba84 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -1735,8 +1735,6 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException qb.getMetaData().setDestForAlias(name, ts.partHandle); } if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - // Set that variable to automatically collect stats during the MapReduce job - qb.getParseInfo().setIsInsertToTable(true); // Add the table spec for the destination table. qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts); } @@ -1773,8 +1771,6 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException } if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { TableSpec ts = new TableSpec(db, conf, this.ast); - // Set that variable to automatically collect stats during the MapReduce job - qb.getParseInfo().setIsInsertToTable(true); // Add the table spec for the destination table. qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts); } @@ -6360,7 +6356,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // verify that our destination is empty before proceeding if (dest_tab.isImmutable() && qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),dest_tab.getTableName())){ - qb.getParseInfo().isInsertToTable(); try { FileSystem fs = partPath.getFileSystem(conf); if (! MetaStoreUtils.isDirEmpty(fs,partPath)){ @@ -12254,7 +12249,6 @@ private void copyInfoToQueryProperties(QueryProperties queryProperties) { queryProperties.setNoScanAnalyzeCommand(qb.getParseInfo().isNoScanAnalyzeCommand()); queryProperties.setAnalyzeRewrite(qb.isAnalyzeRewrite()); queryProperties.setCTAS(qb.getTableDesc() != null); - queryProperties.setInsertToTable(qb.getParseInfo().isInsertToTable()); queryProperties.setHasOuterOrderBy(!qb.getParseInfo().getIsSubQ() && !qb.getParseInfo().getDestToOrderBy().isEmpty()); queryProperties.setOuterQueryLimit(qb.getParseInfo().getOuterQueryLimit()); diff --git a/ql/src/test/queries/clientpositive/insert_into1.q b/ql/src/test/queries/clientpositive/insert_into1.q index af82e3a..f00d06e 100644 --- a/ql/src/test/queries/clientpositive/insert_into1.q +++ b/ql/src/test/queries/clientpositive/insert_into1.q @@ -43,6 +43,14 @@ insert into insert_into1 select 2, 'b'; select * from insert_into1; -DROP TABLE insert_into1; +set hive.stats.autogather=false; +explain +insert into table insert_into1 values(1, 'abc'); +insert into table insert_into1 values(1, 'abc'); +explain +SELECT COUNT(*) FROM insert_into1; +select count(*) from insert_into1; +DROP TABLE insert_into1; +set hive.stats.autogather=true; set hive.compute.query.using.stats=false; diff --git a/ql/src/test/queries/clientpositive/insert_into2.q b/ql/src/test/queries/clientpositive/insert_into2.q index 7183c75..d127c04 100644 --- a/ql/src/test/queries/clientpositive/insert_into2.q +++ b/ql/src/test/queries/clientpositive/insert_into2.q @@ -41,7 +41,15 @@ explain SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; +set hive.stats.autogather=false; + +insert into table insert_into2 partition (ds='2') values(1, 'abc'); +explain +SELECT COUNT(*) FROM insert_into2 where ds='2'; +select count(*) from insert_into2 where ds='2'; + DROP TABLE insert_into2; +set hive.stats.autogather=true; set hive.compute.query.using.stats=false; diff --git a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out index ce0df01..c7a8a20 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out @@ -182,8 +182,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -204,8 +202,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/bucket_map_join_2.q.out b/ql/src/test/results/clientpositive/bucket_map_join_2.q.out index 791e606..3c3793f 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_2.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_2.q.out @@ -182,8 +182,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -204,8 +202,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out index 3ed1fdb..13fae42 100644 --- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out +++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out @@ -309,8 +309,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.src numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -329,8 +327,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.src numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out index 7997fcb..5dd927d 100644 --- a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out +++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out @@ -673,8 +673,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.src numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -693,8 +691,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.src numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/insert_into1.q.out b/ql/src/test/results/clientpositive/insert_into1.q.out index cf627a6..7f3112c 100644 --- a/ql/src/test/results/clientpositive/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/insert_into1.q.out @@ -539,6 +539,157 @@ POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 1 a 2 b +PREHOOK: query: explain +insert into table insert_into1 values(1, 'abc') +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert into table insert_into1 values(1, 'abc') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5 + Stage-2 + Stage-4 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: values__tmp__table__1 + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-6 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert into table insert_into1 values(1, 'abc') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: insert into table insert_into1 values(1, 'abc') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: insert_into1 + Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +3 PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into1 diff --git a/ql/src/test/results/clientpositive/insert_into2.q.out b/ql/src/test/results/clientpositive/insert_into2.q.out index cc7e135..737e576 100644 --- a/ql/src/test/results/clientpositive/insert_into2.q.out +++ b/ql/src/test/results/clientpositive/insert_into2.q.out @@ -394,6 +394,75 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into2 #### A masked pattern was here #### 50 +PREHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 where ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 where ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: insert_into2 + Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into2 where ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into2 where ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +51 PREHOOK: query: DROP TABLE insert_into2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into2 diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out index d3f433d..c1b13aa 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out @@ -149,8 +149,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table2 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -171,8 +169,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table2 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -242,8 +238,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -264,8 +258,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out index 3d850db..580e098 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out @@ -149,8 +149,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table2 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -171,8 +169,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table2 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -242,8 +238,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -264,8 +258,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.table1 numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct table1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/spark/insert_into1.q.out b/ql/src/test/results/clientpositive/spark/insert_into1.q.out index 38134a1..00e71ba 100644 --- a/ql/src/test/results/clientpositive/spark/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/spark/insert_into1.q.out @@ -475,6 +475,122 @@ POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 1 a 2 b +PREHOOK: query: explain +insert into table insert_into1 values(1, 'abc') +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert into table insert_into1 values(1, 'abc') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: values__tmp__table__1 + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + +PREHOOK: query: insert into table insert_into1 values(1, 'abc') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: insert into table insert_into1 values(1, 'abc') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +3 PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into1 diff --git a/ql/src/test/results/clientpositive/spark/insert_into2.q.out b/ql/src/test/results/clientpositive/spark/insert_into2.q.out index 578fae2..26bf1e6 100644 --- a/ql/src/test/results/clientpositive/spark/insert_into2.q.out +++ b/ql/src/test/results/clientpositive/spark/insert_into2.q.out @@ -412,6 +412,81 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into2 #### A masked pattern was here #### 50 +PREHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@insert_into2@ds=2 +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 where ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 where ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into2 + Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into2 where ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into2 where ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +51 PREHOOK: query: DROP TABLE insert_into2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into2 diff --git a/ql/src/test/results/clientpositive/spark/stats3.q.out b/ql/src/test/results/clientpositive/spark/stats3.q.out index 2afb76e..cbd66e5 100644 --- a/ql/src/test/results/clientpositive/spark/stats3.q.out +++ b/ql/src/test/results/clientpositive/spark/stats3.q.out @@ -88,8 +88,6 @@ Table Type: MANAGED_TABLE Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 - numRows 0 - rawDataSize 0 totalSize 11 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/stats3.q.out b/ql/src/test/results/clientpositive/stats3.q.out index 2afb76e..cbd66e5 100644 --- a/ql/src/test/results/clientpositive/stats3.q.out +++ b/ql/src/test/results/clientpositive/stats3.q.out @@ -88,8 +88,6 @@ Table Type: MANAGED_TABLE Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 - numRows 0 - rawDataSize 0 totalSize 11 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/tez/insert_into1.q.out b/ql/src/test/results/clientpositive/tez/insert_into1.q.out index b24b407..0e82691 100644 --- a/ql/src/test/results/clientpositive/tez/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_into1.q.out @@ -495,6 +495,126 @@ POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 1 a 2 b +PREHOOK: query: explain +insert into table insert_into1 values(1, 'abc') +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert into table insert_into1 values(1, 'abc') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: values__tmp__table__1 + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into1 + +PREHOOK: query: insert into table insert_into1 values(1, 'abc') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: insert into table insert_into1 values(1, 'abc') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: insert_into1.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +3 PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into1 -- 1.7.12.4 (Apple Git-37)