diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 5985dcfab9..e6478328ef 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -569,6 +569,7 @@ minillaplocal.query.files=\ input16_cc.q,\ insert_after_drop_partition.q,\ insert_dir_distcp.q,\ + insert_into_default_keyword.q,\ insert_into_with_schema.q,\ insert_values_orig_table.q,\ insert_values_orig_table_use_metadata.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 26f20f2e05..84c6d6eb9d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -615,8 +615,189 @@ public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias, return aggregationTrees; } + /** + * This method figures out if current AST is for INSERT INTO + * @param qbp qbParseInfo + * @param dest destination clause + * @return true or false + */ + private boolean isInsertInto(QBParseInfo qbp, String dest) { + // get the destination and check if it is TABLE + if(qbp == null || dest == null ) return false; + ASTNode destNode = qbp.getDestForClause(dest); + if(destNode != null && destNode.getType() == HiveParser.TOK_TAB) { + return true; + } + return false; + } + + /** + * Given an AST this method figures out if it is a value clause + * e.g. VALUES(1,3..) + */ + private boolean isValueClause(ASTNode select) { + if(select == null) return false; + if(select.getChildCount() == 1) { + ASTNode selectExpr = (ASTNode)select.getChild(0); + if(selectExpr.getChildCount() == 1 ) { + ASTNode selectChildExpr = (ASTNode)selectExpr.getChild(0); + if(selectChildExpr.getType() == HiveParser.TOK_FUNCTION) { + ASTNode inline = (ASTNode)selectChildExpr.getChild(0); + ASTNode func = (ASTNode)selectChildExpr.getChild(1); + if(inline.getText().equals("inline") && func.getType() == HiveParser.TOK_FUNCTION) { + ASTNode arrayNode = (ASTNode)func.getChild(0); + ASTNode funcNode= (ASTNode)func.getChild(1); + if(arrayNode.getText().equals("array") && funcNode.getType() == HiveParser.TOK_FUNCTION) { + return true; + } + } + } + } + } + return false; + } + + /** + * This method creates a list of default constraints which corresponds to + * given schema (taretSchema) or target table's column schema (if targetSchema is null) + * @param tbl + * @param targetSchema + * @return List of default constraints (including NULL if there is no default) + * @throws SemanticException + */ + private List getDefaultConstraints(Table tbl, List targetSchema) throws SemanticException{ + Map colNameToDefaultVal = null; + try { + DefaultConstraint dc = Hive.get().getEnabledDefaultConstraints(tbl.getDbName(), tbl.getTableName()); + colNameToDefaultVal = dc.getColNameToDefaultValueMap(); + } catch (Exception e) { + if (e instanceof SemanticException) { + throw (SemanticException) e; + } else { + throw (new RuntimeException(e)); + } + } + List defaultConstraints = new ArrayList<>(); + if(targetSchema != null) { + for (String colName : targetSchema) { + if (colNameToDefaultVal.containsKey(colName)) { + defaultConstraints.add(colNameToDefaultVal.get(colName)); + } + else { + defaultConstraints.add(null); + } + } + } + else { + for(FieldSchema fs:tbl.getCols()) { + if(colNameToDefaultVal.containsKey(fs.getName())) { + defaultConstraints.add(colNameToDefaultVal.get(fs.getName())); + } + else { + defaultConstraints.add(null); + } + } + } + return defaultConstraints; + } + + /** + * Constructs an AST for given DEFAULT string + * @param newValue + * @throws SemanticException + */ + private ASTNode getNodeReplacementforDefault(String newValue) throws SemanticException { + ASTNode newNode = null; + if(newValue== null) { + newNode = ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); + } + else { + ParseDriver parseDriver = new ParseDriver(); + try { + newNode = parseDriver.parseExpression(newValue); + } catch(Exception e) { + throw new SemanticException("Error while parsing default value for DEFAULT keyword: " + newValue + + ". Error message: " + e.getMessage()); + } + } + return newNode; + } + + /** + * This method replaces ASTNode corresponding to DEFAULT keyword with either DEFAULT constraint + * expression if exists or NULL otherwise + * @param selectExprs + * @param targetTable + * @throws SemanticException + */ + private void replaceDefaultKeywordForUpdate(ASTNode selectExprs, Table targetTable) throws SemanticException{ + List defaultConstraints = null; + for(int i=1; i< selectExprs.getChildCount(); i++) { + //skip first child since it will rowid + ASTNode selectExpr = (ASTNode)selectExprs.getChild(i); + if(selectExpr.getChildCount() == 1 && selectExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL) { + if(selectExpr.getChild(0).getChild(0).getText().toLowerCase().equals("default")) { + if(defaultConstraints == null) { + defaultConstraints = getDefaultConstraints(targetTable, null); + } + ASTNode newNode = getNodeReplacementforDefault(defaultConstraints.get(i-1)); + // replace the node in place + selectExpr.replaceChildren(0, 0, newNode); + } + + } + } + } + + /** + * This method replaces DEFAULT AST node with DEFAULT expression + * @param valueArrClause This is AST for value clause + * @param targetTable + * @param targetSchema this is target schema/column schema if specified in query + * @throws SemanticException + */ + private void replaceDefaultKeyword(ASTNode valueArrClause, Table targetTable, List targetSchema) throws SemanticException{ + List defaultConstraints = null; + for(int i=1; i aggregations = doPhase1GetAggregationsFromSelect(ast, qb, ctx_1.dest); - doPhase1GetColumnAliasesFromSelect(ast, qbp); + doPhase1GetColumnAliasesFromSelect(ast, qbp, ctx_1.dest); qbp.setAggregationExprsForClause(ctx_1.dest, aggregations); qbp.setDistinctFuncExprsForClause(ctx_1.dest, doPhase1GetDistinctFuncExprs(aggregations)); @@ -12004,7 +12185,7 @@ void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticExce fetchTask = pCtx.getFetchTask(); } //find all Acid FileSinkOperatorS - QueryPlanPostProcessor qp = new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId()); + QueryPlanPostProcessor qp = new QueryPlanPostProcessor((List>)rootTasks, acidFileSinks, ctx.getExecutionId()); LOG.info("Completed plan generation"); // 10. put accessed columns to readEntity diff --git a/ql/src/test/queries/clientpositive/insert_into_default_keyword.q b/ql/src/test/queries/clientpositive/insert_into_default_keyword.q new file mode 100644 index 0000000000..14f91fe3b7 --- /dev/null +++ b/ql/src/test/queries/clientpositive/insert_into_default_keyword.q @@ -0,0 +1,116 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +-- SORT_QUERY_RESULTS + +DROP TABLE insert_into1; + +-- No default constraint +CREATE TABLE insert_into1 (key int, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT); +INSERT INTO TABLE insert_into1 values(default, DEFAULT); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- should be able to use any case for DEFAULT +EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt); +INSERT INTO TABLE insert_into1 values(234, dEfAULt); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- multi values +explain insert into insert_into1 values(default, 3),(2,default); +insert into insert_into1 values(default, 3),(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +--with column schema +EXPLAIN INSERT INTO TABLE insert_into1(key) values(default); +INSERT INTO TABLE insert_into1(key) values(default); +select * from insert_into1; +TRUNCATE table insert_into1; + +EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default); +INSERT INTO TABLE insert_into1(key, value) values(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +DROP TABLE insert_into1; + +-- with default constraint +CREATE TABLE insert_into1 (key int DEFAULT 1, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT); +INSERT INTO TABLE insert_into1 values(default, DEFAULT); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- should be able to use any case for DEFAULT +EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt); +INSERT INTO TABLE insert_into1 values(234, dEfAULt); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- multi values +explain insert into insert_into1 values(default, 3),(2,default); +insert into insert_into1 values(default, 3),(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +--with column schema +EXPLAIN INSERT INTO TABLE insert_into1(key) values(default); +INSERT INTO TABLE insert_into1(key) values(default); +select * from insert_into1; +TRUNCATE table insert_into1; + +EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default); +INSERT INTO TABLE insert_into1(key, value) values(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +EXPLAIN INSERT INTO TABLE insert_into1(value, key) values(2,default); +INSERT INTO TABLE insert_into1(value, key) values(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default); +INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default); +select * from insert_into1; +TRUNCATE table insert_into1; +DROP TABLE insert_into1; + + +-- UPDATE +CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +INSERT INTO insert_into1 values(2,1, 45); +EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1; +UPDATE insert_into1 set key = DEFAULT where value=1; +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +INSERT INTO insert_into1 values(2,1, 45); +EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1; +UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1; +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +DROP TABLE insert_into1; + +-- partitioned table +CREATE TABLE tpart(i int, j int DEFAULT 1001) partitioned by (ds string); +-- no column schema +EXPLAIN INSERT INTO tpart partition(ds='1') values(DEFAULT, DEFAULT); +INSERT INTO tpart partition(ds='1') values(DEFAULT, DEFAULT); +SELECT * FROM tpart; +TRUNCATE table tpart; +-- with column schema +EXPLAIN INSERT INTO tpart partition(ds='1')(i) values(DEFAULT); +INSERT INTO tpart partition(ds='1')(i) values(DEFAULT); +EXPLAIN INSERT INTO tpart partition(ds='1')(i,j) values(10, DEFAULT); +INSERT INTO tpart partition(ds='1')(i,j) values(10, DEFAULT); +SELECT * FROM tpart; +TRUNCATE table tpart; +DROP TABLE tpart; diff --git a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out new file mode 100644 index 0000000000..a8d8fd3b09 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out @@ -0,0 +1,2291 @@ +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE insert_into1 (key int, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: CREATE TABLE insert_into1 (key int, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: void) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: UDFToInteger(VALUE._col0) (type: int), UDFToString(VALUE._col0) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(234,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +234 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null,3),const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +2 NULL +NULL 3 +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: void), null (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: void), _col1 (type: string) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [] +POSTHOOK: Lineage: insert_into1.value SIMPLE [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(1,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(234,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +234 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(1,3),const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 3 +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(1)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SIMPLE [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(value, key) values(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(value, key) values(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(2,1)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col2 (type: int), col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(value, key) values(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(value, key) values(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 2 +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(2,null),const struct(1,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.i SCRIPT [] +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value = 1) (type: boolean) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ROW__ID (type: struct), value (type: string), i (type: int) + outputColumnNames: _col0, _col2, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: string), _col3 (type: int) + Execution mode: llap + LLAP IO: may be used (ACID table) + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-3 + Stats Work + Basic Stats Work: + +PREHOOK: query: UPDATE insert_into1 set key = DEFAULT where value=1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: UPDATE insert_into1 set key = DEFAULT where value=1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 1 45 +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.i SCRIPT [] +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value = 1) (type: boolean) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ROW__ID (type: struct), i (type: int) + outputColumnNames: _col0, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + value expressions: _col3 (type: int) + Execution mode: llap + LLAP IO: may be used (ACID table) + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), null (type: string), VALUE._col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-3 + Stats Work + Basic Stats Work: + +PREHOOK: query: UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL 45 +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: CREATE TABLE tpart(i int, j int DEFAULT 1001) partitioned by (ds string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tpart +POSTHOOK: query: CREATE TABLE tpart(i int, j int DEFAULT 1001) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tpart +PREHOOK: query: EXPLAIN INSERT INTO tpart partition(ds='1') values(DEFAULT, DEFAULT) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO tpart partition(ds='1') values(DEFAULT, DEFAULT) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null,1001)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: int), col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tpart + Select Operator + expressions: _col0 (type: int), _col1 (type: int), '1' (type: string) + outputColumnNames: i, j, ds + Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(i, 'hll'), compute_stats(j, 'hll') + keys: ds (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 933 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 933 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col1 (type: struct), _col2 (type: struct), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tpart + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: i, j + Column Types: int, int + Table: default.tpart + +PREHOOK: query: INSERT INTO tpart partition(ds='1') values(DEFAULT, DEFAULT) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tpart@ds=1 +POSTHOOK: query: INSERT INTO tpart partition(ds='1') values(DEFAULT, DEFAULT) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tpart@ds=1 +POSTHOOK: Lineage: tpart PARTITION(ds=1).i EXPRESSION [] +POSTHOOK: Lineage: tpart PARTITION(ds=1).j SCRIPT [] +PREHOOK: query: SELECT * FROM tpart +PREHOOK: type: QUERY +PREHOOK: Input: default@tpart +PREHOOK: Input: default@tpart@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM tpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tpart +POSTHOOK: Input: default@tpart@ds=1 +#### A masked pattern was here #### +NULL 1001 1 +PREHOOK: query: TRUNCATE table tpart +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@tpart@ds=1 +POSTHOOK: query: TRUNCATE table tpart +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@tpart@ds=1 +PREHOOK: query: EXPLAIN INSERT INTO tpart partition(ds='1')(i) values(DEFAULT) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO tpart partition(ds='1')(i) values(DEFAULT) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: int), 1001 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tpart + Select Operator + expressions: _col0 (type: int), _col1 (type: int), '1' (type: string) + outputColumnNames: i, j, ds + Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(i, 'hll'), compute_stats(j, 'hll') + keys: ds (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 933 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 933 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col1 (type: struct), _col2 (type: struct), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tpart + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: i, j + Column Types: int, int + Table: default.tpart + +PREHOOK: query: INSERT INTO tpart partition(ds='1')(i) values(DEFAULT) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tpart@ds=1 +POSTHOOK: query: INSERT INTO tpart partition(ds='1')(i) values(DEFAULT) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tpart@ds=1 +POSTHOOK: Lineage: tpart PARTITION(ds=1).i EXPRESSION [] +POSTHOOK: Lineage: tpart PARTITION(ds=1).j SIMPLE [] +PREHOOK: query: EXPLAIN INSERT INTO tpart partition(ds='1')(i,j) values(10, DEFAULT) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO tpart partition(ds='1')(i,j) values(10, DEFAULT) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(10,1001)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tpart + Select Operator + expressions: _col0 (type: int), _col1 (type: int), '1' (type: string) + outputColumnNames: i, j, ds + Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(i, 'hll'), compute_stats(j, 'hll') + keys: ds (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 933 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 933 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col1 (type: struct), _col2 (type: struct), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 1 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tpart + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: i, j + Column Types: int, int + Table: default.tpart + +PREHOOK: query: INSERT INTO tpart partition(ds='1')(i,j) values(10, DEFAULT) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tpart@ds=1 +POSTHOOK: query: INSERT INTO tpart partition(ds='1')(i,j) values(10, DEFAULT) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tpart@ds=1 +POSTHOOK: Lineage: tpart PARTITION(ds=1).i SCRIPT [] +POSTHOOK: Lineage: tpart PARTITION(ds=1).j SCRIPT [] +PREHOOK: query: SELECT * FROM tpart +PREHOOK: type: QUERY +PREHOOK: Input: default@tpart +PREHOOK: Input: default@tpart@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM tpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tpart +POSTHOOK: Input: default@tpart@ds=1 +#### A masked pattern was here #### +10 1001 1 +NULL 1001 1 +PREHOOK: query: TRUNCATE table tpart +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@tpart@ds=1 +POSTHOOK: query: TRUNCATE table tpart +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@tpart@ds=1 +PREHOOK: query: DROP TABLE tpart +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@tpart +PREHOOK: Output: default@tpart +POSTHOOK: query: DROP TABLE tpart +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@tpart +POSTHOOK: Output: default@tpart