diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 0d6aaae535..d5d07526ca 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -569,6 +569,7 @@ minillaplocal.query.files=\ input16_cc.q,\ insert_after_drop_partition.q,\ insert_dir_distcp.q,\ + insert_into1_default,\ insert_into_with_schema.q,\ insert_values_orig_table.q,\ insert_values_orig_table_use_metadata.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 26f20f2e05..e50f1ff412 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -18,31 +18,12 @@ package org.apache.hadoop.hive.ql.parse; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.Serializable; -import java.security.AccessControlException; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Queue; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; - +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.google.common.collect.Iterables; +import com.google.common.collect.Sets; +import com.google.common.math.IntMath; +import com.google.common.math.LongMath; import org.antlr.runtime.ClassicToken; import org.antlr.runtime.CommonToken; import org.antlr.runtime.Token; @@ -70,62 +51,18 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.ql.CompilationOpContext; -import org.apache.hadoop.hive.ql.Context; -import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.QueryProperties; -import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.*; import org.apache.hadoop.hive.ql.cache.results.CacheUsage; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; -import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; -import org.apache.hadoop.hive.ql.exec.ArchiveUtils; -import org.apache.hadoop.hive.ql.exec.ColumnInfo; -import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory; -import org.apache.hadoop.hive.ql.exec.FetchTask; -import org.apache.hadoop.hive.ql.exec.FileSinkOperator; -import org.apache.hadoop.hive.ql.exec.FilterOperator; -import org.apache.hadoop.hive.ql.exec.FunctionInfo; -import org.apache.hadoop.hive.ql.exec.FunctionRegistry; -import org.apache.hadoop.hive.ql.exec.GroupByOperator; -import org.apache.hadoop.hive.ql.exec.JoinOperator; -import org.apache.hadoop.hive.ql.exec.LimitOperator; -import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; -import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.exec.OperatorFactory; -import org.apache.hadoop.hive.ql.exec.RecordReader; -import org.apache.hadoop.hive.ql.exec.RecordWriter; -import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; -import org.apache.hadoop.hive.ql.exec.RowSchema; -import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator; -import org.apache.hadoop.hive.ql.exec.SelectOperator; -import org.apache.hadoop.hive.ql.exec.TableScanOperator; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskFactory; -import org.apache.hadoop.hive.ql.exec.UnionOperator; -import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.*; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; -import org.apache.hadoop.hive.ql.io.AcidInputFormat; -import org.apache.hadoop.hive.ql.io.AcidOutputFormat; -import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.io.*; import org.apache.hadoop.hive.ql.io.AcidUtils.Operation; -import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.ql.io.NullRowsInputFormat; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.GraphWalker; @@ -133,18 +70,9 @@ import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; -import org.apache.hadoop.hive.ql.metadata.CheckConstraint; -import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; -import org.apache.hadoop.hive.ql.metadata.DummyPartition; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.metadata.InvalidTableException; -import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; +import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.Optimizer; import org.apache.hadoop.hive.ql.optimizer.QueryPlanPostProcessor; import org.apache.hadoop.hive.ql.optimizer.Transform; @@ -157,95 +85,30 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec.SpecType; import org.apache.hadoop.hive.ql.parse.CalcitePlanner.ASTSearcher; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFInputSpec; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputSpec; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputType; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression; +import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.*; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionedTableFunctionSpec; -import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitioningSpec; import org.apache.hadoop.hive.ql.parse.QBSubQuery.SubQueryType; import org.apache.hadoop.hive.ql.parse.SubQueryUtils.ISubQueryJoinInfo; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowExpressionSpec; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFrameSpec; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType; -import org.apache.hadoop.hive.ql.plan.AggregationDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.parse.WindowingSpec.*; +import org.apache.hadoop.hive.ql.plan.*; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; -import org.apache.hadoop.hive.ql.plan.CreateViewDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; -import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; -import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.FileSinkDesc; -import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc; -import org.apache.hadoop.hive.ql.plan.ForwardDesc; -import org.apache.hadoop.hive.ql.plan.GroupByDesc; -import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.InsertTableDesc; -import org.apache.hadoop.hive.ql.plan.JoinCondDesc; -import org.apache.hadoop.hive.ql.plan.JoinDesc; -import org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc; -import org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc; -import org.apache.hadoop.hive.ql.plan.LimitDesc; -import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; -import org.apache.hadoop.hive.ql.plan.LoadFileDesc; -import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType; -import org.apache.hadoop.hive.ql.plan.MapJoinDesc; -import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hadoop.hive.ql.plan.PTFDesc; -import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.ScriptDesc; -import org.apache.hadoop.hive.ql.plan.SelectDesc; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.plan.TableScanDesc; -import org.apache.hadoop.hive.ql.plan.UDTFDesc; -import org.apache.hadoop.hive.ql.plan.UnionDesc; import org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef; import org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef; import org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef; import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.ResourceType; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; +import org.apache.hadoop.hive.ql.udf.generic.*; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCardinalityViolation; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; import org.apache.hadoop.hive.ql.util.ResourceDownloader; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; -import org.apache.hadoop.hive.serde2.NoOpFetchFormatter; -import org.apache.hadoop.hive.serde2.NullStructSerDe; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.SerDeUtils; +import org.apache.hadoop.hive.serde2.*; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2; -import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.*; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.StructField; -import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; @@ -260,12 +123,16 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.base.Splitter; -import com.google.common.base.Strings; -import com.google.common.collect.Iterables; -import com.google.common.collect.Sets; -import com.google.common.math.IntMath; -import com.google.common.math.LongMath; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.Serializable; +import java.security.AccessControlException; +import java.util.*; +import java.util.Map.Entry; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; /** * Implementation of the semantic analyzer. It generates the query plan. @@ -615,8 +482,150 @@ public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias, return aggregationTrees; } + private boolean isInsertInto(QBParseInfo qbp, String dest) { + // get the destination and check if it is TABLE + ASTNode destNode = qbp.getDestForClause(dest); + if(destNode != null && destNode.getType() == HiveParser.TOK_TAB) { + return true; + //TODO: TOK_PARTITION + //String fullTableName = getUnescapedName((ASTNode)destNode.getChild(0)); + //if(qbp.isInsertIntoTable(fullTableName)){ + // return true; + // } + } + return false; + } + private boolean isValueClause(ASTNode select) { + if(select.getChildCount() == 1) { + ASTNode selectExpr = (ASTNode)select.getChild(0); + if(selectExpr.getChildCount() == 1 ) { + ASTNode selectChildExpr = (ASTNode)selectExpr.getChild(0); + if(selectChildExpr.getType() == HiveParser.TOK_FUNCTION) { + ASTNode inline = (ASTNode)selectChildExpr.getChild(0); + ASTNode func = (ASTNode)selectChildExpr.getChild(1); + if(inline.getText().equals("inline") && func.getType() == HiveParser.TOK_FUNCTION) { + ASTNode arrayNode = (ASTNode)func.getChild(0); + ASTNode funcNode= (ASTNode)func.getChild(1); + if(arrayNode.getText().equals("array") && funcNode.getType() == HiveParser.TOK_FUNCTION) { + return true; + } + } + } + } + } + return false; + } + + private List getDefaultConstraints(Table tbl, List targetSchema) throws SemanticException{ + Map colNameToDefaultVal = null; + try { + DefaultConstraint dc = Hive.get().getEnabledDefaultConstraints(tbl.getDbName(), tbl.getTableName()); + colNameToDefaultVal = dc.getColNameToDefaultValueMap(); + } catch (Exception e) { + if (e instanceof SemanticException) { + throw (SemanticException) e; + } else { + throw (new RuntimeException(e)); + } + } + List defaultConstraints = new ArrayList<>(); + if(targetSchema != null) { + for (String colName : targetSchema) { + if (colNameToDefaultVal.containsKey(colName)) { + defaultConstraints.add(colNameToDefaultVal.get(colName)); + } + else { + defaultConstraints.add(null); + } + } + } + else { + for(FieldSchema fs:tbl.getCols()) { + if(colNameToDefaultVal.containsKey(fs.getName())) { + defaultConstraints.add(colNameToDefaultVal.get(fs.getName())); + } + else { + defaultConstraints.add(null); + } + } + } + return defaultConstraints; + } + + private ASTNode getNodeReplacementforDefault(String newValue) throws SemanticException { + ASTNode newNode = null; + if(newValue== null) { + newNode = ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); + } + else { + ParseDriver parseDriver = new ParseDriver(); + try { + newNode = parseDriver.parseExpression(newValue); + } catch(Exception e) { + throw new SemanticException("Error while parsing default value for DEFAULT keyword: " + newValue + + ". Error message: " + e.getMessage()); + } + } + return newNode; + } + + private void replaceDefaultKeywordForUpdate(ASTNode selectExprs, Table targetTable) throws SemanticException{ + List defaultConstraints = null; + for(int i=1; i< selectExprs.getChildCount(); i++) { + //skip first child since it will rowid + ASTNode selectExpr = (ASTNode)selectExprs.getChild(i); + if(selectExpr.getChildCount() == 1 && selectExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL) { + if(selectExpr.getChild(0).getChild(0).getText().toLowerCase().equals("default")) { + if(defaultConstraints == null) { + defaultConstraints = getDefaultConstraints(targetTable, null); + } + ASTNode newNode = getNodeReplacementforDefault(defaultConstraints.get(i-1)); + // replace the node in place + selectExpr.replaceChildren(0, 0, newNode); + } + + } + } + } + private void replaceDefaultKeyword(ASTNode valueArrClause, Table targetTable, List targetSchema) throws SemanticException{ + List defaultConstraints = null; + for(int i=1; i aggregations = doPhase1GetAggregationsFromSelect(ast, qb, ctx_1.dest); - doPhase1GetColumnAliasesFromSelect(ast, qbp); + doPhase1GetColumnAliasesFromSelect(ast, qbp, ctx_1.dest); qbp.setAggregationExprsForClause(ctx_1.dest, aggregations); qbp.setDistinctFuncExprsForClause(ctx_1.dest, doPhase1GetDistinctFuncExprs(aggregations)); @@ -12004,7 +12013,7 @@ void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticExce fetchTask = pCtx.getFetchTask(); } //find all Acid FileSinkOperatorS - QueryPlanPostProcessor qp = new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId()); + QueryPlanPostProcessor qp = new QueryPlanPostProcessor((List>)rootTasks, acidFileSinks, ctx.getExecutionId()); LOG.info("Completed plan generation"); // 10. put accessed columns to readEntity diff --git a/ql/src/test/queries/clientpositive/insert_into1_default.q b/ql/src/test/queries/clientpositive/insert_into1_default.q new file mode 100644 index 0000000000..6b69d30497 --- /dev/null +++ b/ql/src/test/queries/clientpositive/insert_into1_default.q @@ -0,0 +1,95 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +-- SORT_QUERY_RESULTS + +DROP TABLE insert_into1; + +-- No default constraint +CREATE TABLE insert_into1 (key int, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT); +INSERT INTO TABLE insert_into1 values(default, DEFAULT); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- should be able to use any case for DEFAULT +EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt); +INSERT INTO TABLE insert_into1 values(234, dEfAULt); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- multi values +explain insert into insert_into1 values(default, 3),(2,default); +insert into insert_into1 values(default, 3),(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +--with column schema +EXPLAIN INSERT INTO TABLE insert_into1(key) values(default); +INSERT INTO TABLE insert_into1(key) values(default); +select * from insert_into1; +TRUNCATE table insert_into1; + +EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default); +INSERT INTO TABLE insert_into1(key, value) values(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +DROP TABLE insert_into1; + +-- with default constraint +CREATE TABLE insert_into1 (key int DEFAULT 1, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT); +INSERT INTO TABLE insert_into1 values(default, DEFAULT); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- should be able to use any case for DEFAULT +EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt); +INSERT INTO TABLE insert_into1 values(234, dEfAULt); +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +-- multi values +explain insert into insert_into1 values(default, 3),(2,default); +insert into insert_into1 values(default, 3),(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +--with column schema +EXPLAIN INSERT INTO TABLE insert_into1(key) values(default); +INSERT INTO TABLE insert_into1(key) values(default); +select * from insert_into1; +TRUNCATE table insert_into1; + +EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default); +INSERT INTO TABLE insert_into1(key, value) values(2,default); +select * from insert_into1; +TRUNCATE table insert_into1; + +EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default); +INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default); +select * from insert_into1; +TRUNCATE table insert_into1; +DROP TABLE insert_into1; + + +-- UPDATE +CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +INSERT INTO insert_into1 values(2,1, 45); +EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1; +UPDATE insert_into1 set key = DEFAULT where value=1; +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +INSERT INTO insert_into1 values(2,1, 45); +EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1; +UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1; +SELECT * from insert_into1; +TRUNCATE table insert_into1; + +DROP TABLE insert_into1; diff --git a/ql/src/test/results/clientpositive/llap/insert_into1_default.q.out b/ql/src/test/results/clientpositive/llap/insert_into1_default.q.out new file mode 100644 index 0000000000..8447ac5aa4 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/insert_into1_default.q.out @@ -0,0 +1,1760 @@ +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE insert_into1 (key int, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: CREATE TABLE insert_into1 (key int, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: void) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: UDFToInteger(VALUE._col0) (type: int), UDFToString(VALUE._col0) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(234,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +234 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null,3),const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +2 NULL +NULL 3 +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: void), null (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: void), _col1 (type: string) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key EXPRESSION [] +POSTHOOK: Lineage: insert_into1.value SIMPLE [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string) + clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(1,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(default, DEFAULT) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(234,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1 values(234, dEfAULt) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +234 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(1,3),const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: insert into insert_into1 values(default, 3),(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: insert into insert_into1 values(default, 3),(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 3 +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(1)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key) values(default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SIMPLE [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(2,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(2,null),const struct(1,null)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), null (type: void) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: void) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int), UDFToString(VALUE._col1) (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: string) + outputColumnNames: key, value + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: key, value + Column Types: int, string + Table: default.insert_into1 + +PREHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value EXPRESSION [] +PREHOOK: query: select * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL +2 NULL +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.i SCRIPT [] +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value = 1) (type: boolean) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ROW__ID (type: struct), value (type: string), i (type: int) + outputColumnNames: _col0, _col2, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: string), _col3 (type: int) + Execution mode: llap + LLAP IO: may be used (ACID table) + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-3 + Stats Work + Basic Stats Work: + +PREHOOK: query: UPDATE insert_into1 set key = DEFAULT where value=1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: UPDATE insert_into1 set key = DEFAULT where value=1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 1 45 +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: INSERT INTO insert_into1 values(2,1, 45) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@insert_into1 +POSTHOOK: Lineage: insert_into1.i SCRIPT [] +POSTHOOK: Lineage: insert_into1.key SCRIPT [] +POSTHOOK: Lineage: insert_into1.value SCRIPT [] +PREHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (value = 1) (type: boolean) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ROW__ID (type: struct), i (type: int) + outputColumnNames: _col0, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: struct) + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + value expressions: _col3 (type: int) + Execution mode: llap + LLAP IO: may be used (ACID table) + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct), 1 (type: int), null (type: string), VALUE._col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.insert_into1 + Write Type: UPDATE + + Stage: Stage-3 + Stats Work + Basic Stats Work: + +PREHOOK: query: UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: SELECT * from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +1 NULL 45 +PREHOOK: query: TRUNCATE table insert_into1 +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: TRUNCATE table insert_into1 +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@insert_into1 +PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@insert_into1 +PREHOOK: Output: default@insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@insert_into1 +POSTHOOK: Output: default@insert_into1