diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index f2e927f9a2..a93f7decc4 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1568,6 +1568,9 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" + "query will fail if hive.enforce.bucketmapjoin is set to true."), + HIVE_ENFORCE_NOT_NULL_CONSTRAINT("hive.constraint.notnull.enforce", true, + "Should \"IS NOT NULL \" constraint be enforced?"), + HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false, "Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."), HIVE_AUTO_SORTMERGE_JOIN_REDUCE("hive.auto.convert.sortmerge.join.reduce.side", true, diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index bd2416f4eb..10d7dc3e95 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -538,6 +538,7 @@ minillaplocal.query.files=\ dynpart_sort_opt_vectorization.q,\ dynpart_sort_optimization.q,\ dynpart_sort_optimization_acid.q,\ + enforce_constraint_notnull.q,\ escape1.q,\ escape2.q,\ exchgpartition2lel.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 68e6ae3cd5..bd5be3483a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -214,6 +214,7 @@ system.registerUDF("rand", UDFRand.class, false); system.registerGenericUDF("abs", GenericUDFAbs.class); system.registerGenericUDF("sq_count_check", GenericUDFSQCountCheck.class); + system.registerGenericUDF("enforce_constraint", GenericUDFEnforceNotNullConstraint.class); system.registerGenericUDF("pmod", GenericUDFPosMod.class); system.registerUDF("ln", UDFLn.class, false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 1a2b3c1f6c..76522cb535 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -4769,6 +4769,30 @@ public NotNullConstraint getReliableNotNullConstraints(String dbName, String tbl return getNotNullConstraints(dbName, tblName, true); } + /** + * Get not null constraints associated with the table that are enabled/enforced + * + * @param dbName Database Name + * @param tblName Table Name + * @return Not null constraints associated with the table. + * @throws HiveException + */ + public NotNullConstraint getEnabledNotNullConstraints(String dbName, String tblName) + throws HiveException { + try { + List notNullConstraints = getMSC().getNotNullConstraints( + new NotNullConstraintsRequest(dbName, tblName)); + if (notNullConstraints != null && !notNullConstraints.isEmpty()) { + notNullConstraints = notNullConstraints.stream() + .filter(nnc -> nnc.isEnable_cstr()) + .collect(Collectors.toList()); + } + return new NotNullConstraint(notNullConstraints, tblName, dbName); + } catch (Exception e) { + throw new HiveException(e); + } + } + private NotNullConstraint getNotNullConstraints(String dbName, String tblName, boolean onlyReliable) throws HiveException { try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 2c3af5d130..720a43c09b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -778,8 +778,8 @@ private static void generateConstraintInfos(ASTNode child, List columnNa constraintName = unescapeIdentifier(grandChild.getChild(0).getText().toLowerCase()); } else if (type == HiveParser.TOK_ENABLE) { enable = true; - // validate is true by default if we enable the constraint - validate = true; + // validate is false by default if we enable the constraint + validate = false; } else if (type == HiveParser.TOK_DISABLE) { enable = false; // validate is false by default if we disable the constraint @@ -792,11 +792,7 @@ private static void generateConstraintInfos(ASTNode child, List columnNa rely = true; } } - if (enable) { - throw new SemanticException( - ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("ENABLE/ENFORCED feature not supported yet. " - + "Please use DISABLE/NOT ENFORCED instead.")); - } + if (validate) { throw new SemanticException( ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("VALIDATE feature not supported yet. " diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index c558356a4e..29263db05e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -53,6 +53,7 @@ import org.antlr.runtime.tree.TreeWizard; import org.antlr.runtime.tree.TreeWizard.ContextVisitor; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.util.ImmutableBitSet; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -129,6 +130,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient; import org.apache.hadoop.hive.ql.metadata.Table; @@ -138,6 +140,7 @@ import org.apache.hadoop.hive.ql.optimizer.Transform; import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTBuilder; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc; import org.apache.hadoop.hive.ql.optimizer.lineage.Generator; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; @@ -204,6 +207,7 @@ import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UDTFDesc; import org.apache.hadoop.hive.ql.plan.UnionDesc; +import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef; import org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef; import org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef; @@ -6609,10 +6613,104 @@ private void setStatsForNonNativeTable(Table tab) throws SemanticException { this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); } + private ImmutableBitSet getEnabledNotNullConstraints(Table tbl) throws HiveException{ + List nullConstraints = new ArrayList<>(); + final NotNullConstraint nnc = Hive.get().getEnabledNotNullConstraints( + tbl.getDbName(), tbl.getTableName()); + ImmutableBitSet bitSet = null; + if(nnc == null || nnc.getNotNullConstraints().isEmpty()) { + return bitSet; + } + // Build the bitset with not null columns + ImmutableBitSet.Builder builder = ImmutableBitSet.builder(); + for (String nnCol : nnc.getNotNullConstraints().values()) { + int nnPos = -1; + for (int i = 0; i < tbl.getCols().size(); i++) { + if (tbl.getCols().get(i).getName().equals(nnCol)) { + nnPos = i; + break; + } + } + if (nnPos == -1) { + LOG.error("Column for not null constraint definition " + nnCol + " not found"); + break; + } + builder.set(nnPos); + } + bitSet = builder.build(); + return bitSet; + } + + private Operator + genIsNotNullConstraint(String dest, QB qb, Operator input) + throws SemanticException { + + if(qb.getMetaData().getDestTypeForAlias(dest) == QBMetaData.DEST_TABLE) { + // if this is an insert into statement we might need to add constraint check + final Table targetTable = qb.getMetaData().getNameToDestTable().get(dest); + ImmutableBitSet nullConstraintBitSet = null; + try { + nullConstraintBitSet = getEnabledNotNullConstraints(targetTable); + } catch (Exception e) { + if (e instanceof SemanticException) { + throw (SemanticException) e; + } else { + throw (new RuntimeException(e)); + } + } + if(nullConstraintBitSet == null) { + return input; + } + assert (input.getType() == OperatorType.SELECT); + List colExprs = ((SelectOperator) input).getConf().getColList(); + + ExprNodeDesc currUDF = null; + for (int i = 0; i < colExprs.size(); i++) { + if (nullConstraintBitSet.indexOf(i) != -1) { + ExprNodeDesc isNotNullUDF = TypeCheckProcFactory.DefaultExprProcessor. + getFuncExprNodeDesc("isnotnull", colExprs.get(i)); + ExprNodeDesc constraintUDF = TypeCheckProcFactory.DefaultExprProcessor. + getFuncExprNodeDesc("enforce_constraint", isNotNullUDF); + if (currUDF != null) { + currUDF = TypeCheckProcFactory.DefaultExprProcessor. + getFuncExprNodeDesc("and", currUDF, constraintUDF); + } else { + currUDF = constraintUDF; + } + } + } + if (currUDF != null) { + assert (input.getParentOperators().size() == 1); + Operator childOperator = (Operator) input.getParentOperators().get(0); + childOperator.setChildOperators(null); + RowResolver inputRR = opParseCtx.get(childOperator).getRowResolver(); + Operator newConstraintFilter = putOpInsertMap(OperatorFactory.getAndMakeChild( + new FilterDesc(currUDF, false), new RowSchema( + inputRR.getColumnInfos()), childOperator), inputRR); + + input.setParentOperators(null); + + List> childOperators = new ArrayList<>(); + childOperators.add(input); + newConstraintFilter.setChildOperators(childOperators); + + List> parentOperators = new ArrayList<>(); + parentOperators.add(newConstraintFilter); + input.setParentOperators(parentOperators); + + } + } + return input; + } @SuppressWarnings("nls") protected Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException { + boolean forceNotNullConstraint = conf.getBoolVar(ConfVars.HIVE_ENFORCE_NOT_NULL_CONSTRAINT); + if(forceNotNullConstraint) { + genIsNotNullConstraint(dest, qb, input); + } + RowResolver inputRR = opParseCtx.get(input).getRowResolver(); QBMetaData qbm = qb.getMetaData(); Integer dest_type = qbm.getDestTypeForAlias(dest); @@ -11310,6 +11408,68 @@ Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticExcept return genPlan(qb); } + private List getConstraintsForcedForTable(final Table tbl) { + List allCols = tbl.getCols(); + List constr = new ArrayList(); + for(FieldSchema fs:allCols) { + constr.add(true); + } + return constr; + } + + // this method will rewrite incoming AST to enforce constraint + // (For now only NOT NULL) + /* private void enforceConstraint(final QB qb, ASTNode query) { + QBParseInfo parseInfo = qb.getParseInfo(); + QBMetaData qbMetaData = qb.getMetaData(); + //TODO: if and only if it is insert query + + // get all the inclauses + Set allInClauses = parseInfo.getClauseNames(); + for(String currClause:allInClauses) { + Table targetTable = qbMetaData.getNameToDestTable().get(currClause); + List constr = getConstraintsForcedForTable(targetTable); + + ASTNode selectC = parseInfo.getSelForClause(currClause); + List selExprs = selectC.getChildren(); + + assert(selExprs.size() == constr.size()); + + ASTNode newNode = null; + + for(int i=0; i 1) { + throw new UDFArgumentLengthException( + "Invalid number of arguments. enforce_constraint UDF expected one argument but received: " + arguments.length); + } + + boi = (BooleanObjectInspector) arguments[0]; + return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector; + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + + Object a = arguments[0].get(); + boolean result = boi.get(a); + + if(result == false) { + throw new UDFArgumentLengthException( + "NOT NULL constraint violated!"); + } + resultBool.set(true); + return resultBool; + } + + @Override + protected String getFuncName() { + return "enforce_constraint"; + } + + @Override + public String getDisplayString(String[] children) { + return getStandardDisplayString(getFuncName(), children); + } + +} diff --git a/ql/src/test/queries/clientnegative/insert_into_notnull_constraint.q b/ql/src/test/queries/clientnegative/insert_into_notnull_constraint.q new file mode 100644 index 0000000000..da35b02cd3 --- /dev/null +++ b/ql/src/test/queries/clientnegative/insert_into_notnull_constraint.q @@ -0,0 +1,3 @@ +create table nullConstraintCheck(i int NOT NULL enforced, j int); +insert into nullConstraintCheck values(null,2); + diff --git a/ql/src/test/queries/clientnegative/insert_overwrite_notnull_constraint.q b/ql/src/test/queries/clientnegative/insert_overwrite_notnull_constraint.q new file mode 100644 index 0000000000..64fe4e4ca5 --- /dev/null +++ b/ql/src/test/queries/clientnegative/insert_overwrite_notnull_constraint.q @@ -0,0 +1,3 @@ +create table nullConstraintCheck(i int NOT NULL enforced, j int); +insert overwrite table nullConstraintCheck values(null,2); + diff --git a/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q new file mode 100644 index 0000000000..28257c6e1c --- /dev/null +++ b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q @@ -0,0 +1,28 @@ +-- create table with first and last column with not null +CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED); + +-- insert value tuples +explain INSERT INTO table1 values('not', 'null', 'constraint'); + +-- insert from select +explain INSERT INTO table1 select key, src.value, value from src; + +-- insert overwrite +explain INSERT OVERWRITE TABLE table1 select src.*, value from src; + +-- insert overwrite with if not exists +explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src; + +-- multi insert +create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING); +create table src_multi2 (i STRING, j STRING NOT NULL DISABLE); + +explain +from src +insert overwrite table src_multi1 select * where key < 10 +insert overwrite table src_multi2 select * where key > 10 and key < 20; + +explain +from src +insert into table src_multi1 select * where src.key < 10 +insert into table src_multi2 select src.* where key > 10 and key < 20; diff --git a/ql/src/test/results/clientnegative/insert_into_notnull_constraint.q.out b/ql/src/test/results/clientnegative/insert_into_notnull_constraint.q.out new file mode 100644 index 0000000000..6718ae0d1d --- /dev/null +++ b/ql/src/test/results/clientnegative/insert_into_notnull_constraint.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nullConstraintCheck +POSTHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nullConstraintCheck +FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated! diff --git a/ql/src/test/results/clientnegative/insert_overwrite_notnull_constraint.q.out b/ql/src/test/results/clientnegative/insert_overwrite_notnull_constraint.q.out new file mode 100644 index 0000000000..6718ae0d1d --- /dev/null +++ b/ql/src/test/results/clientnegative/insert_overwrite_notnull_constraint.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@nullConstraintCheck +POSTHOOK: query: create table nullConstraintCheck(i int NOT NULL enforced, j int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@nullConstraintCheck +FAILED: UDFArgumentException org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException: NOT NULL constraint violated! diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out new file mode 100644 index 0000000000..71c15b31a4 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out @@ -0,0 +1,712 @@ +PREHOOK: query: CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table1 +POSTHOOK: query: CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table1 +PREHOOK: query: explain INSERT INTO table1 values('not', 'null', 'constraint') +PREHOOK: type: QUERY +POSTHOOK: query: explain INSERT INTO table1 values('not', 'null', 'constraint') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('not','null','constraint')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Filter Operator + predicate: (enforce_constraint(col1 is not null) and enforce_constraint(col3 is not null)) (type: boolean) + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: col1 (type: string), col2 (type: string), col3 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + outputColumnNames: a, b, c + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll') + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, b, c + Column Types: string, string, string + Table: default.table1 + +PREHOOK: query: explain INSERT INTO table1 select key, src.value, value from src +PREHOOK: type: QUERY +POSTHOOK: query: explain INSERT INTO table1 select key, src.value, value from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (enforce_constraint(key is not null) and enforce_constraint(value is not null)) (type: boolean) + Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string), value (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + outputColumnNames: a, b, c + Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll') + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, b, c + Column Types: string, string, string + Table: default.table1 + +PREHOOK: query: explain INSERT OVERWRITE TABLE table1 select src.*, value from src +PREHOOK: type: QUERY +POSTHOOK: query: explain INSERT OVERWRITE TABLE table1 select src.*, value from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (enforce_constraint(key is not null) and enforce_constraint(value is not null)) (type: boolean) + Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string), value (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + outputColumnNames: a, b, c + Statistics: Num rows: 125 Data size: 33625 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll') + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, b, c + Column Types: string, string, string + Table: default.table1 + +PREHOOK: query: explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src +PREHOOK: type: QUERY +POSTHOOK: query: explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (enforce_constraint(key is not null) and enforce_constraint(value is not null)) (type: boolean) + Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), key (type: string), value (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 125 Data size: 33125 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 125 Data size: 33125 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + outputColumnNames: a, b, c + Statistics: Num rows: 125 Data size: 33125 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll') + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, b, c + Column Types: string, string, string + Table: default.table1 + +PREHOOK: query: create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi1 +POSTHOOK: query: create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi1 +PREHOOK: query: create table src_multi2 (i STRING, j STRING NOT NULL DISABLE) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_multi2 +POSTHOOK: query: create table src_multi2 (i STRING, j STRING NOT NULL DISABLE) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_multi2 +PREHOOK: query: explain +from src +insert overwrite table src_multi1 select * where key < 10 +insert overwrite table src_multi2 select * where key > 10 and key < 20 +PREHOOK: type: QUERY +POSTHOOK: query: explain +from src +insert overwrite table src_multi1 select * where key < 10 +insert overwrite table src_multi2 select * where key > 10 and key < 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((key < 10) and enforce_constraint(key is not null)) (type: boolean) + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: a, b + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Filter Operator + predicate: ((key < 20) and (key > 10)) (type: boolean) + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi2 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: i, j + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(i, 'hll'), compute_stats(j, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi1 + + Stage: Stage-4 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, b + Column Types: string, string + Table: default.src_multi1 + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi2 + + Stage: Stage-5 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: i, j + Column Types: string, string + Table: default.src_multi2 + +PREHOOK: query: explain +from src +insert into table src_multi1 select * where src.key < 10 +insert into table src_multi2 select src.* where key > 10 and key < 20 +PREHOOK: type: QUERY +POSTHOOK: query: explain +from src +insert into table src_multi1 select * where src.key < 10 +insert into table src_multi2 select src.* where key > 10 and key < 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((key < 10) and enforce_constraint(key is not null)) (type: boolean) + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: a, b + Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Filter Operator + predicate: ((key < 20) and (key > 10)) (type: boolean) + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi2 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: i, j + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(i, 'hll'), compute_stats(j, 'hll') + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: struct), _col1 (type: struct) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi1 + + Stage: Stage-4 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, b + Column Types: string, string + Table: default.src_multi1 + + Stage: Stage-1 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_multi2 + + Stage: Stage-5 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: i, j + Column Types: string, string + Table: default.src_multi2 +