diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 125ad19278..4d2eacb498 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -599,6 +599,7 @@ minillaplocal.query.files=\ orc_ppd_decimal.q,\ orc_ppd_timestamp.q,\ order_null.q,\ + partition_ctas.q,\ partition_multilevels.q,\ partition_shared_scan.q,\ partition_pruning.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 397cee2a5f..939ef360c2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -48,16 +48,15 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; +import java.util.concurrent.ExecutionException; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.concurrent.ExecutionException; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; - import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -289,8 +288,6 @@ import org.slf4j.LoggerFactory; import org.stringtemplate.v4.ST; -import static org.apache.commons.lang.StringUtils.join; - /** * DDLTask implementation. * diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index a1f5133c86..d2c04e22de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -322,6 +322,7 @@ public int execute(DriverContext driverContext) { } } } + // Multi-file load is for dynamic partitions when some partitions do not // need to merge and they can simply be moved to the target directory. // This is also used for MM table conversion. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index fa92385378..d887124c91 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1489,10 +1489,15 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, boolean truncate = false; if (mvWork.getLoadTableWork() != null) { statsWork = new BasicStatsWork(mvWork.getLoadTableWork()); - String tableName = mvWork.getLoadTableWork().getTable().getTableName(); truncate = mvWork.getLoadTableWork().getReplace(); + String tableName = mvWork.getLoadTableWork().getTable().getTableName(); try { - table = Hive.get().getTable(SessionState.get().getCurrentDatabase(), tableName); + // For partitioned CTAS, the table has not been created, but we can retrieve it + // from the loadTableWork. For rest of query types, we just retrieve it from + // metastore. + table = mvWork.getLoadTableWork().getMdTable() != null ? + mvWork.getLoadTableWork().getMdTable() : + Hive.get().getTable(SessionState.get().getCurrentDatabase(), tableName); } catch (HiveException e) { throw new RuntimeException("unexpected; table should be present already..: " + tableName, e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 6be48ca23b..49f5487f40 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -215,6 +215,7 @@ TOK_TABCOLLIST; TOK_TABCOL; TOK_TABLECOMMENT; TOK_TABLEPARTCOLS; +TOK_TABLEPARTCOLNAMES; TOK_TABLEROWFORMAT; TOK_TABLEROWFORMATFIELD; TOK_TABLEROWFORMATCOLLITEMS; @@ -1088,7 +1089,7 @@ createTableStatement tablePropertiesPrefixed? | (LPAREN columnNameTypeOrConstraintList RPAREN)? tableComment? - tablePartition? + createTablePartitionSpec? tableBuckets? tableSkewed? tableRowFormat? @@ -1101,7 +1102,7 @@ createTableStatement ^(TOK_LIKETABLE $likeName?) columnNameTypeOrConstraintList? tableComment? - tablePartition? + createTablePartitionSpec? tableBuckets? tableSkewed? tableRowFormat? @@ -1987,13 +1988,28 @@ tableComment KW_COMMENT comment=StringLiteral -> ^(TOK_TABLECOMMENT $comment) ; -tablePartition -@init { pushMsg("table partition specification", state); } +createTablePartitionSpec +@init { pushMsg("create table partition specification", state); } @after { popMsg(state); } - : KW_PARTITIONED KW_BY LPAREN columnNameTypeConstraint (COMMA columnNameTypeConstraint)* RPAREN + : KW_PARTITIONED KW_BY LPAREN (opt1 = createTablePartitionColumnTypeSpec | opt2 = createTablePartitionColumnSpec) RPAREN + -> {$opt1.tree != null}? $opt1 + -> $opt2 + ; + +createTablePartitionColumnTypeSpec +@init { pushMsg("create table partition specification", state); } +@after { popMsg(state); } + : columnNameTypeConstraint (COMMA columnNameTypeConstraint)* -> ^(TOK_TABLEPARTCOLS columnNameTypeConstraint+) ; +createTablePartitionColumnSpec +@init { pushMsg("create table partition specification", state); } +@after { popMsg(state); } + : columnName (COMMA columnName)* + -> ^(TOK_TABLEPARTCOLNAMES columnName+) + ; + tableBuckets @init { pushMsg("table buckets specification", state); } @after { popMsg(state); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index b5adf1bd04..b28cf98029 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -38,6 +38,8 @@ import java.util.Optional; import java.util.Queue; import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; import java.util.TreeSet; import java.util.function.Supplier; import java.util.regex.Pattern; @@ -55,8 +57,9 @@ import org.antlr.runtime.tree.TreeWizard.ContextVisitor; import org.apache.calcite.rel.RelNode; import org.apache.calcite.util.ImmutableBitSet; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; -import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -6953,7 +6956,7 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, } private void genPartnCols(String dest, Operator input, QB qb, - TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException { + TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException { boolean enforceBucketing = false; ArrayList partnColsNoConvert = new ArrayList(); @@ -6986,13 +6989,13 @@ private void genPartnCols(String dest, Operator input, QB qb, } @SuppressWarnings("unchecked") - private void setStatsForNonNativeTable(Table tab) throws SemanticException { - String tableName = DDLSemanticAnalyzer.getDotName(new String[] { tab.getDbName(), - tab.getTableName() }); + private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException { + String qTableName = DDLSemanticAnalyzer.getDotName(new String[] { dbName, + tableName }); AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, null, false); HashMap mapProp = new HashMap<>(); mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null); - alterTblDesc.setOldName(tableName); + alterTblDesc.setOldName(qTableName); alterTblDesc.setProps(mapProp); alterTblDesc.setDropIfExists(true); this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); @@ -7227,21 +7230,22 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) RowResolver inputRR = opParseCtx.get(input).getRowResolver(); QBMetaData qbm = qb.getMetaData(); - Integer dest_type = qbm.getDestTypeForAlias(dest); + Integer destType = qbm.getDestTypeForAlias(dest); - Table dest_tab = null; // destination table if any + Table destinationTable = null; // destination table if any boolean destTableIsTransactional; // true for full ACID table and MM table boolean destTableIsFullAcid; // should the destination table be written to using ACID boolean destTableIsTemporary = false; boolean destTableIsMaterialization = false; - Partition dest_part = null;// destination partition if any + Partition destinationPartition = null;// destination partition if any Path queryTmpdir = null; // the intermediate destination directory - Path dest_path = null; // the final destination directory - TableDesc table_desc = null; + Path destinationPath = null; // the final destination directory + TableDesc tableDescriptor = null; int currentTableId = 0; boolean isLocal = false; SortBucketRSCtx rsCtx = new SortBucketRSCtx(); DynamicPartitionCtx dpCtx = null; + Table partitionedCTASOrMVTable = null; // destination partitioned CTAS or MV table if any LoadTableDesc ltd = null; ListBucketingCtx lbCtx = null; Map partSpec = null; @@ -7249,24 +7253,24 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) Long writeId = null; HiveTxnManager txnMgr = getTxnMgr(); - switch (dest_type.intValue()) { + switch (destType.intValue()) { case QBMetaData.DEST_TABLE: { - dest_tab = qbm.getDestTableForAlias(dest); - destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab); - destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab); - destTableIsTemporary = dest_tab.isTemporary(); + destinationTable = qbm.getDestTableForAlias(dest); + destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable); + destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable); + destTableIsTemporary = destinationTable.isTemporary(); // Is the user trying to insert into a external tables - checkExternalTable(dest_tab); + checkExternalTable(destinationTable); partSpec = qbm.getPartSpecForAlias(dest); - dest_path = dest_tab.getPath(); + destinationPath = destinationTable.getPath(); - checkImmutableTable(qb, dest_tab, dest_path, false); + checkImmutableTable(qb, destinationTable, destinationPath, false); // check for partition - List parts = dest_tab.getPartitionKeys(); + List parts = destinationTable.getPartitionKeys(); if (parts != null && parts.size() > 0) { // table is partitioned if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition throw new SemanticException(generateErrorMessage( @@ -7275,8 +7279,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } dpCtx = qbm.getDPCtx(dest); if (dpCtx == null) { - dest_tab.validatePartColumnNames(partSpec, false); - dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, + destinationTable.validatePartColumnNames(partSpec, false); + dpCtx = new DynamicPartitionCtx(partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); qbm.setDPCtx(dest, dpCtx); @@ -7284,76 +7288,76 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } // Check for dynamic partitions. - dpCtx = checkDynPart(qb, qbm, dest_tab, partSpec, dest); + dpCtx = checkDynPart(qb, qbm, destinationTable, partSpec, dest); if (dpCtx != null && dpCtx.getSPPath() != null) { - dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath()); + destinationPath = new Path(destinationTable.getPath(), dpCtx.getSPPath()); } - boolean isNonNativeTable = dest_tab.isNonNative(); - isMmTable = AcidUtils.isInsertOnlyTable(dest_tab.getParameters()); + boolean isNonNativeTable = destinationTable.isNonNative(); + isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters()); if (isNonNativeTable || isMmTable) { - queryTmpdir = dest_path; + queryTmpdir = destinationPath; } else { - queryTmpdir = ctx.getTempDirForFinalJobPath(dest_path); + queryTmpdir = ctx.getTempDirForFinalJobPath(destinationPath); } if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_TABLE specifying " + queryTmpdir - + " from " + dest_path); + + " from " + destinationPath); } if (dpCtx != null) { // set the root of the temporary path where dynamic partition columns will populate dpCtx.setRootPath(queryTmpdir); } // this table_desc does not contain the partitioning columns - table_desc = Utilities.getTableDesc(dest_tab); + tableDescriptor = Utilities.getTableDesc(destinationTable); // Add NOT NULL constraint check input = genConstraintsPlan(dest, qb, input); // Add sorting/bucketing if needed - input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx); + input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx); - idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName()); + idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName()); currentTableId = destTableId; destTableId++; - lbCtx = constructListBucketingCtx(dest_tab.getSkewedColNames(), - dest_tab.getSkewedColValues(), dest_tab.getSkewedColValueLocationMaps(), - dest_tab.isStoredAsSubDirectories(), conf); + lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(), + destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(), + destinationTable.isStoredAsSubDirectories(), conf); // Create the work for moving the table // NOTE: specify Dynamic partitions in dest_tab for WriteEntity if (!isNonNativeTable) { AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsFullAcid) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); + acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest); //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM - checkAcidConstraints(qb, table_desc, dest_tab); + checkAcidConstraints(qb, tableDescriptor, destinationTable); } try { if (ctx.getExplainConfig() != null) { writeId = null; // For explain plan, txn won't be opened and doesn't make sense to allocate write id } else { if (isMmTable) { - writeId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName()); } else { writeId = acidOp == Operation.NOT_ACID ? null : - txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName()); } } } catch (LockException ex) { throw new SemanticException("Failed to allocate write Id", ex); } boolean isReplace = !qb.getParseInfo().isInsertIntoTable( - dest_tab.getDbName(), dest_tab.getTableName()); - ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, writeId); + destinationTable.getDbName(), destinationTable.getTableName()); + ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, isReplace, writeId); if (writeId != null) { ltd.setStmtId(txnMgr.getCurrentStmtId()); } // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old // deltas and base and leave them up to the cleaner to clean up boolean isInsertInto = qb.getParseInfo().isInsertIntoTable( - dest_tab.getDbName(), dest_tab.getTableName()); + destinationTable.getDbName(), destinationTable.getTableName()); LoadFileType loadType = (!isInsertInto && !destTableIsTransactional) ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING; ltd.setLoadFileType(loadType); @@ -7363,88 +7367,88 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } else { // This is a non-native table. // We need to set stats as inaccurate. - setStatsForNonNativeTable(dest_tab); + setStatsForNonNativeTable(destinationTable.getDbName(), destinationTable.getTableName()); // true if it is insert overwrite. boolean overwrite = !qb.getParseInfo().isInsertIntoTable( - String.format("%s.%s", dest_tab.getDbName(), dest_tab.getTableName())); - createPreInsertDesc(dest_tab, overwrite); + String.format("%s.%s", destinationTable.getDbName(), destinationTable.getTableName())); + createPreInsertDesc(destinationTable, overwrite); - ltd = new LoadTableDesc(queryTmpdir, table_desc, partSpec == null ? ImmutableMap.of() : partSpec); + ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, partSpec == null ? ImmutableMap.of() : partSpec); ltd.setInsertOverwrite(overwrite); ltd.setLoadFileType(overwrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING); } - if (dest_tab.isMaterializedView()) { + if (destinationTable.isMaterializedView()) { materializedViewUpdateDesc = new MaterializedViewDesc( - dest_tab.getFullyQualifiedName(), false, false, true); + destinationTable.getFullyQualifiedName(), false, false, true); } WriteEntity output = generateTableWriteEntity( - dest, dest_tab, partSpec, ltd, dpCtx, isNonNativeTable); + dest, destinationTable, partSpec, ltd, dpCtx, isNonNativeTable); ctx.getLoadTableOutputMap().put(ltd, output); break; } case QBMetaData.DEST_PARTITION: { - dest_part = qbm.getDestPartitionForAlias(dest); - dest_tab = dest_part.getTable(); - destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab); - destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab); + destinationPartition = qbm.getDestPartitionForAlias(dest); + destinationTable = destinationPartition.getTable(); + destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable); + destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable); - checkExternalTable(dest_tab); + checkExternalTable(destinationTable); - Path tabPath = dest_tab.getPath(); - Path partPath = dest_part.getDataLocation(); + Path tabPath = destinationTable.getPath(); + Path partPath = destinationPartition.getDataLocation(); - checkImmutableTable(qb, dest_tab, partPath, true); + checkImmutableTable(qb, destinationTable, partPath, true); // if the table is in a different dfs than the partition, // replace the partition's dfs with the table's dfs. - dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri() + destinationPath = new Path(tabPath.toUri().getScheme(), tabPath.toUri() .getAuthority(), partPath.toUri().getPath()); - isMmTable = AcidUtils.isInsertOnlyTable(dest_tab.getParameters()); - queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForFinalJobPath(dest_path); + isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters()); + queryTmpdir = isMmTable ? destinationPath : ctx.getTempDirForFinalJobPath(destinationPath); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying " - + queryTmpdir + " from " + dest_path); + + queryTmpdir + " from " + destinationPath); } - table_desc = Utilities.getTableDesc(dest_tab); + tableDescriptor = Utilities.getTableDesc(destinationTable); // Add NOT NULL constraint check input = genConstraintsPlan(dest, qb, input); // Add sorting/bucketing if needed - input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx); + input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx); - idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName()); + idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName()); currentTableId = destTableId; destTableId++; - lbCtx = constructListBucketingCtx(dest_part.getSkewedColNames(), - dest_part.getSkewedColValues(), dest_part.getSkewedColValueLocationMaps(), - dest_part.isStoredAsSubDirectories(), conf); + lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(), + destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(), + destinationPartition.isStoredAsSubDirectories(), conf); AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsFullAcid) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); + acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest); //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM? - checkAcidConstraints(qb, table_desc, dest_tab); + checkAcidConstraints(qb, tableDescriptor, destinationTable); } try { if (ctx.getExplainConfig() != null) { writeId = 0L; // For explain plan, txn won't be opened and doesn't make sense to allocate write id } else { if (isMmTable) { - writeId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName()); } else { writeId = (acidOp == Operation.NOT_ACID) ? null : - txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName()); } } } catch (LockException ex) { throw new SemanticException("Failed to allocate write Id", ex); } - ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, writeId); + ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, destinationPartition.getSpec(), acidOp, writeId); if (writeId != null) { ltd.setStmtId(txnMgr.getCurrentStmtId()); } @@ -7460,11 +7464,11 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) ltd.setLbCtx(lbCtx); loadTableWork.add(ltd); - if (!outputs.add(new WriteEntity(dest_part, - determineWriteType(ltd, dest_tab.isNonNative(), dest)))) { + if (!outputs.add(new WriteEntity(destinationPartition, + determineWriteType(ltd, destinationTable.isNonNative(), dest)))) { throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES - .getMsg(dest_tab.getTableName() + "@" + dest_part.getName())); + .getMsg(destinationTable.getTableName() + "@" + destinationPartition.getName())); } break; } @@ -7472,18 +7476,21 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) isLocal = true; // fall through case QBMetaData.DEST_DFS_FILE: { - dest_path = new Path(qbm.getDestFileForAlias(dest)); - - ArrayList colInfos = inputRR.getColumnInfos(); + destinationPath = new Path(qbm.getDestFileForAlias(dest)); // CTAS case: the file output format and serde are defined by the create // table command rather than taking the default value - List field_schemas = null; + List fieldSchemas = null; + List partitionColumns = null; + List partitionColumnNames = null; + List fileSinkColInfos = null; CreateTableDesc tblDesc = qb.getTableDesc(); CreateViewDesc viewDesc = qb.getViewDesc(); - boolean isCtas = false; if (tblDesc != null) { - field_schemas = new ArrayList(); + fieldSchemas = new ArrayList<>(); + partitionColumns = new ArrayList<>(); + partitionColumnNames = tblDesc.getPartColNames(); + fileSinkColInfos = new ArrayList<>(); destTableIsTemporary = tblDesc.isTemporary(); destTableIsMaterialization = tblDesc.isMaterialization(); if (AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) { @@ -7500,7 +7507,10 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) tblDesc.setInitialMmWriteId(writeId); } } else if (viewDesc != null) { - field_schemas = new ArrayList(); + fieldSchemas = new ArrayList<>(); + partitionColumns = new ArrayList<>(); + partitionColumnNames = viewDesc.getPartColNames(); + fileSinkColInfos = new ArrayList<>(); destTableIsTemporary = false; } @@ -7513,55 +7523,73 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // otherwise write to the file system implied by the directory // no copy is required. we may want to revisit this policy in future try { - Path qPath = FileUtils.makeQualified(dest_path, conf); + Path qPath = FileUtils.makeQualified(destinationPath, conf); queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir - + " from " + dest_path + " (" + isMmTable + ")"); + + " from " + destinationPath + " (" + isMmTable + ")"); } } catch (Exception e) { throw new SemanticException("Error creating temporary folder on: " - + dest_path, e); + + destinationPath, e); } } - ColsAndTypes ct = deriveFileSinkColTypes(inputRR, field_schemas); - String cols = ct.cols, colTypes = ct.colTypes; + // Check for dynamic partitions. + final String cols, colTypes; + final boolean isPartitioned; + if (dpCtx != null) { + throw new SemanticException("Dynamic partition context has already been created, this should not happen"); + } + if (!CollectionUtils.isEmpty(partitionColumnNames)) { + ColsAndTypes ct = deriveFileSinkColTypes( + inputRR, partitionColumnNames, fieldSchemas, partitionColumns, + fileSinkColInfos); + cols = ct.cols; + colTypes = ct.colTypes; + dpCtx = new DynamicPartitionCtx(partitionColumnNames, + conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + qbm.setDPCtx(dest, dpCtx); + // set the root of the temporary path where dynamic partition columns will populate + dpCtx.setRootPath(queryTmpdir); + isPartitioned = true; + } else { + ColsAndTypes ct = deriveFileSinkColTypes(inputRR, fieldSchemas); + cols = ct.cols; + colTypes = ct.colTypes; + isPartitioned = false; + } // update the create table descriptor with the resulting schema. if (tblDesc != null) { - tblDesc.setCols(new ArrayList(field_schemas)); + tblDesc.setCols(new ArrayList<>(fieldSchemas)); + tblDesc.setPartCols(new ArrayList<>(partitionColumns)); } else if (viewDesc != null) { - viewDesc.setSchema(new ArrayList(field_schemas)); + viewDesc.setSchema(new ArrayList<>(fieldSchemas)); + viewDesc.setPartCols(new ArrayList<>(partitionColumns)); } destTableIsTransactional = tblDesc != null && AcidUtils.isTransactionalTable(tblDesc); destTableIsFullAcid = tblDesc != null && AcidUtils.isFullAcidTable(tblDesc); boolean isDestTempFile = true; - if (!ctx.isMRTmpFileURI(dest_path.toUri().toString())) { - idToTableNameMap.put(String.valueOf(destTableId), dest_path.toUri().toString()); + if (!ctx.isMRTmpFileURI(destinationPath.toUri().toString())) { + idToTableNameMap.put(String.valueOf(destTableId), destinationPath.toUri().toString()); currentTableId = destTableId; destTableId++; isDestTempFile = false; } - boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE); - // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats. - loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, dest_path, isDfsDir, cols, - colTypes, - destTableIsFullAcid ?//there is a change here - prev version had 'transadtional', one beofre' acid' - Operation.INSERT : Operation.NOT_ACID, - isMmCtas)); if (tblDesc == null) { if (viewDesc != null) { - table_desc = PlanUtils.getTableDesc(viewDesc, cols, colTypes); + tableDescriptor = PlanUtils.getTableDesc(viewDesc, cols, colTypes); } else if (qb.getIsQuery()) { String fileFormat; if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) { fileFormat = "SequenceFile"; HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT, fileFormat); - table_desc= + tableDescriptor = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, ThriftJDBCBinarySerDe.class); // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll @@ -7578,29 +7606,97 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) serdeClass = LazyBinarySerDe2.class; } } - table_desc = + tableDescriptor = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass); } } else { - table_desc = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes); + tableDescriptor = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes); } } else { - table_desc = PlanUtils.getTableDesc(tblDesc, cols, colTypes); + tableDescriptor = PlanUtils.getTableDesc(tblDesc, cols, colTypes); } - if (!outputs.add(new WriteEntity(dest_path, !isDfsDir, isDestTempFile))) { - throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES - .getMsg(dest_path.toUri().toString())); + boolean isDfsDir = (destType.intValue() == QBMetaData.DEST_DFS_FILE); + + if (isPartitioned) { + // Create a SELECT that may reorder the columns if needed + RowResolver rowResolver = new RowResolver(); + List columnExprs = new ArrayList<>(); + List colNames = new ArrayList<>(); + Map colExprMap = new HashMap<>(); + for (int i = 0; i < fileSinkColInfos.size(); i++) { + ColumnInfo ci = fileSinkColInfos.get(i); + ExprNodeDesc columnExpr = new ExprNodeColumnDesc(ci); + String name = getColumnInternalName(i); + rowResolver.put("", name, new ColumnInfo(name, columnExpr.getTypeInfo(), "", false)); + columnExprs.add(columnExpr); + colNames.add(name); + colExprMap.put(name, columnExpr); + } + input = putOpInsertMap(OperatorFactory.getAndMakeChild( + new SelectDesc(columnExprs, colNames), new RowSchema(rowResolver + .getColumnInfos()), input), rowResolver); + input.setColumnExprMap(colExprMap); + // If this is a partitioned CTAS or MV statement, we are going to create a LoadTableDesc + // object. Although the table does not exist in metastore, we will swamp the CreateTableTask + // and MoveTask resulting from this LoadTable so in this specific case, first we create + // the metastore table, then we move and commit the partitions. At least for the time being, + // this order needs to be enforced because metastore expects a table to exist before we can + // add any partitions to it. + boolean isNonNativeTable = tableDescriptor.isNonNative(); + if (!isNonNativeTable) { + AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; + if (destTableIsFullAcid) { + acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest); + //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM + checkAcidConstraints(qb, tableDescriptor, null); + } + // isReplace = false in case concurrent operation is executed + ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, false, writeId); + if (writeId != null) { + ltd.setStmtId(txnMgr.getCurrentStmtId()); + } + ltd.setLoadFileType(LoadFileType.KEEP_EXISTING); + ltd.setInsertOverwrite(false); + loadTableWork.add(ltd); + } else { + // This is a non-native table. + // We need to set stats as inaccurate. + setStatsForNonNativeTable(tableDescriptor.getDbName(), tableDescriptor.getTableName()); + ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx.getPartSpec()); + ltd.setInsertOverwrite(false); + ltd.setLoadFileType(LoadFileType.KEEP_EXISTING); + } + try { + partitionedCTASOrMVTable = tblDesc != null ? tblDesc.toTable(conf) : viewDesc.toTable(conf); + ltd.setMdTable(partitionedCTASOrMVTable); + WriteEntity output = generateTableWriteEntity( + dest, partitionedCTASOrMVTable, dpCtx.getPartSpec(), ltd, dpCtx, isNonNativeTable); + ctx.getLoadTableOutputMap().put(ltd, output); + } catch (HiveException e) { + throw new SemanticException(e); + } + } else { + // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats. + loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, destinationPath, isDfsDir, cols, + colTypes, + destTableIsFullAcid ?//there is a change here - prev version had 'transactional', one before 'acid' + Operation.INSERT : Operation.NOT_ACID, + isMmCtas)); + if (!outputs.add(new WriteEntity(destinationPath, !isDfsDir, isDestTempFile))) { + throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES + .getMsg(destinationPath.toUri().toString())); + } } break; } default: - throw new SemanticException("Unknown destination type: " + dest_type); + throw new SemanticException("Unknown destination type: " + destType); } - if (!(dest_type.intValue() == QBMetaData.DEST_DFS_FILE && qb.getIsQuery())) { - input = genConversionSelectOperator(dest, qb, input, table_desc, dpCtx); + if (!(destType.intValue() == QBMetaData.DEST_DFS_FILE && qb.getIsQuery())) { + input = genConversionSelectOperator(dest, qb, input, tableDescriptor, dpCtx); } inputRR = opParseCtx.get(input).getRowResolver(); @@ -7612,7 +7708,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) "", true)); } else { try { - StructObjectInspector rowObjectInspector = (StructObjectInspector) table_desc + StructObjectInspector rowObjectInspector = (StructObjectInspector) tableDescriptor .getDeserializer(conf).getObjectInspector(); List fields = rowObjectInspector .getAllStructFieldRefs(); @@ -7631,22 +7727,22 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // The output files of a FileSink can be merged if they are either not being written to a table // or are being written to a table which is not bucketed // and table the table is not sorted - boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 0) || - (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0))); + boolean canBeMerged = (destinationTable == null || !((destinationTable.getNumBuckets() > 0) || + (destinationTable.getSortCols() != null && destinationTable.getSortCols().size() > 0))); // If this table is working with ACID semantics, turn off merging canBeMerged &= !destTableIsFullAcid; // Generate the partition columns from the parent input - if (dest_type.intValue() == QBMetaData.DEST_TABLE - || dest_type.intValue() == QBMetaData.DEST_PARTITION) { - genPartnCols(dest, input, qb, table_desc, dest_tab, rsCtx); + if (destType.intValue() == QBMetaData.DEST_TABLE + || destType.intValue() == QBMetaData.DEST_PARTITION) { + genPartnCols(dest, input, qb, tableDescriptor, destinationTable, rsCtx); } - FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, table_desc, dest_part, - dest_path, currentTableId, destTableIsFullAcid, destTableIsTemporary,//this was 1/4 acid + FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, tableDescriptor, destinationPartition, + destinationPath, currentTableId, destTableIsFullAcid, destTableIsTemporary,//this was 1/4 acid destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, - canBeMerged, dest_tab, writeId, isMmCtas, dest_type, qb); + canBeMerged, destinationTable, writeId, isMmCtas, destType, qb); if (isMmCtas) { // Add FSD so that the LoadTask compilation could fix up its path to avoid the move. tableDesc.setWriter(fileSinkDesc); @@ -7657,7 +7753,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) ltd.setInsertOverwrite(true); } } - if (null != table_desc && useBatchingSerializer(table_desc.getSerdeClassName())) { + if (null != tableDescriptor && useBatchingSerializer(tableDescriptor.getSerdeClassName())) { fileSinkDesc.setIsUsingBatchingSerDe(true); } else { fileSinkDesc.setIsUsingBatchingSerDe(false); @@ -7670,26 +7766,24 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) if (LOG.isDebugEnabled()) { LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: " - + dest_path + " row schema: " + inputRR.toString()); + + destinationPath + " row schema: " + inputRR.toString()); } FileSinkOperator fso = (FileSinkOperator) output; - fso.getConf().setTable(dest_tab); + fso.getConf().setTable(destinationTable); // the following code is used to collect column stats when // hive.stats.autogather=true // and it is an insert overwrite or insert into table - if (dest_tab != null - && !dest_tab.isNonNative() - && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) + if (conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) { - if (dest_type.intValue() == QBMetaData.DEST_TABLE) { - genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo() - .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); - } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) { - genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb - .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); - + // TODO: Column stats autogather does not work for CTAS statements + if (destType.intValue() == QBMetaData.DEST_TABLE && !destinationTable.isNonNative()) { + genAutoColumnStatsGatheringPipeline(qb, destinationTable, partSpec, input, qb.getParseInfo() + .isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName())); + } else if (destType.intValue() == QBMetaData.DEST_PARTITION && !destinationTable.isNonNative()) { + genAutoColumnStatsGatheringPipeline(qb, destinationTable, destinationPartition.getSpec(), input, qb + .getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName())); } } return output; @@ -7706,61 +7800,101 @@ private boolean hasSetBatchSerializer(String serdeClassName) { serdeClassName.equalsIgnoreCase(ArrowColumnarBatchSerDe.class.getName()); } + private ColsAndTypes deriveFileSinkColTypes(RowResolver inputRR, List field_schemas) + throws SemanticException { + return deriveFileSinkColTypes(inputRR, new ArrayList<>(), field_schemas, new ArrayList<>(), new ArrayList<>()); + } + private ColsAndTypes deriveFileSinkColTypes( - RowResolver inputRR, List field_schemas) throws SemanticException { + RowResolver inputRR, List partitionColumnNames, + List columns, List partitionColumns, + List fileSinkColInfos) throws SemanticException { ColsAndTypes result = new ColsAndTypes("", ""); - ArrayList colInfos = inputRR.getColumnInfos(); + List allColumns = new ArrayList<>(); + List colInfos = inputRR.getColumnInfos(); + List nonPartColInfos = new ArrayList<>(); + SortedMap> partColInfos = new TreeMap<>(); boolean first = true; - for (ColumnInfo colInfo : colInfos) { + int numNonPartitionedCols = colInfos.size() - partitionColumnNames.size(); + if (numNonPartitionedCols <= 0) { + throw new SemanticException("Too many partition columns declared"); + } + for (int i = 0; i < colInfos.size(); i++) { + ColumnInfo colInfo = colInfos.get(i); String[] nm = inputRR.reverseLookup(colInfo.getInternalName()); if (nm[1] != null) { // non-null column alias colInfo.setAlias(nm[1]); } + boolean isPartitionCol = false; String colName = colInfo.getInternalName(); //default column name - if (field_schemas != null) { + if (columns != null) { FieldSchema col = new FieldSchema(); if (!("".equals(nm[0])) && nm[1] != null) { colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove `` } colName = fixCtasColumnName(colName); col.setName(colName); + allColumns.add(colName); String typeName = colInfo.getType().getTypeName(); // CTAS should NOT create a VOID type if (typeName.equals(serdeConstants.VOID_TYPE_NAME)) { throw new SemanticException(ErrorMsg.CTAS_CREATES_VOID_TYPE.getMsg(colName)); } col.setType(typeName); - field_schemas.add(col); - } - - if (!first) { - result.cols = result.cols.concat(","); - result.colTypes = result.colTypes.concat(":"); - } - - first = false; - result.cols = result.cols.concat(colName); - - // Replace VOID type with string when the output is a temp table or - // local files. - // A VOID type can be generated under the query: - // - // select NULL from tt; - // or - // insert overwrite local directory "abc" select NULL from tt; - // - // where there is no column type to which the NULL value should be - // converted. - // - String tName = colInfo.getType().getTypeName(); - if (tName.equals(serdeConstants.VOID_TYPE_NAME)) { - result.colTypes = result.colTypes.concat(serdeConstants.STRING_TYPE_NAME); - } else { - result.colTypes = result.colTypes.concat(tName); + int idx = partitionColumnNames.indexOf(colName); + if (idx >= 0) { + partColInfos.put(idx, Pair.of(col, colInfo)); + isPartitionCol = true; + } else { + columns.add(col); + nonPartColInfos.add(colInfo); + } + } + + if (!isPartitionCol) { + if (!first) { + result.cols = result.cols.concat(","); + result.colTypes = result.colTypes.concat(":"); + } + + first = false; + result.cols = result.cols.concat(colName); + + // Replace VOID type with string when the output is a temp table or + // local files. + // A VOID type can be generated under the query: + // + // select NULL from tt; + // or + // insert overwrite local directory "abc" select NULL from tt; + // + // where there is no column type to which the NULL value should be + // converted. + // + String tName = colInfo.getType().getTypeName(); + if (tName.equals(serdeConstants.VOID_TYPE_NAME)) { + result.colTypes = result.colTypes.concat(serdeConstants.STRING_TYPE_NAME); + } else { + result.colTypes = result.colTypes.concat(tName); + } } + + } + + if (partColInfos.size() != partitionColumnNames.size()) { + throw new SemanticException("Table declaration contains partition columns that are not present " + + "in query result schema. " + + "Query columns: " + allColumns + ". " + + "Partition columns: " + partitionColumnNames); } + + // FileSinkColInfos comprise nonPartCols followed by partCols + fileSinkColInfos.addAll(nonPartColInfos); + partitionColumns.addAll(partColInfos.values().stream().map(Pair::getLeft).collect(Collectors.toList())); + fileSinkColInfos.addAll(partColInfos.values().stream().map(Pair::getRight).collect(Collectors.toList())); + return result; } @@ -7964,7 +8098,7 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, DynamicPartitionCtx dpCtx = qbm.getDPCtx(dest); if (dpCtx == null) { dest_tab.validatePartColumnNames(partSpec, false); - dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, + dpCtx = new DynamicPartitionCtx(partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); qbm.setDPCtx(dest, dpCtx); @@ -7988,16 +8122,9 @@ private void createPreInsertDesc(Table table, boolean overwrite) { } - private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc, + private void genAutoColumnStatsGatheringPipeline(QB qb, Table table, Map partSpec, Operator curr, boolean isInsertInto) throws SemanticException { - String tableName = table_desc.getTableName(); - Table table = null; - try { - table = db.getTable(tableName); - } catch (HiveException e) { - throw new SemanticException(e.getMessage()); - } - LOG.info("Generate an operator pipeline to autogather column stats for table " + tableName + LOG.info("Generate an operator pipeline to autogather column stats for table " + table.getTableName() + " in query " + ctx.getCmd()); ColumnStatsAutoGatherContext columnStatsAutoGatherContext = null; columnStatsAutoGatherContext = new ColumnStatsAutoGatherContext(this, conf, curr, table, partSpec, isInsertInto, ctx); @@ -13015,6 +13142,7 @@ ASTNode analyzeCreateTable( String likeTableName = null; List cols = new ArrayList(); List partCols = new ArrayList(); + List partColNames = new ArrayList<>(); List bucketCols = new ArrayList(); List primaryKeys = new ArrayList(); List foreignKeys = new ArrayList(); @@ -13130,6 +13258,9 @@ ASTNode analyzeCreateTable( "partition columns. ")); } break; + case HiveParser.TOK_TABLEPARTCOLNAMES: + partColNames = getColumnNames(child); + break; case HiveParser.TOK_ALTERTABLE_BUCKETS: bucketCols = getColumnNames((ASTNode) child.getChild(0)); if (child.getChildCount() == 2) { @@ -13235,6 +13366,10 @@ ASTNode analyzeCreateTable( switch (command_type) { case CREATE_TABLE: // REGULAR CREATE TABLE DDL + if (!CollectionUtils.isEmpty(partColNames)) { + throw new SemanticException( + "Partition columns can only declared using their name and types in regular CREATE TABLE statements"); + } tblProps = addDefaultProperties( tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary); addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); @@ -13338,12 +13473,16 @@ ASTNode analyzeCreateTable( } } + if (!CollectionUtils.isEmpty(partCols)) { + throw new SemanticException( + "Partition columns can only declared using their names in CTAS statements"); + } tblProps = addDefaultProperties( tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary); addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, - partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, + partColNames, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 7a2a2c7a28..005e7b6bb0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -21,6 +21,7 @@ import com.google.common.collect.Interner; import com.google.common.collect.Interners; +import org.apache.commons.collections.*; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.conf.HiveConf; @@ -32,6 +33,7 @@ import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc; +import org.apache.hadoop.hive.ql.exec.MoveTask; import org.apache.hadoop.hive.ql.exec.StatsTask; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; @@ -327,13 +329,13 @@ public void compile(final ParseContext pCtx, crtTblDesc.validate(conf); Task crtTblTask = TaskFactory.get(new DDLWork( inputs, outputs, crtTblDesc)); - patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask); + patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames())); } else if (pCtx.getQueryProperties().isMaterializedView()) { // generate a DDL task and make it a dependent task of the leaf CreateViewDesc viewDesc = pCtx.getCreateViewDesc(); Task crtViewTask = TaskFactory.get(new DDLWork( inputs, outputs, viewDesc)); - patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask); + patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask, CollectionUtils.isEmpty(viewDesc.getPartColNames())); } else if (pCtx.getMaterializedViewUpdateDesc() != null) { // If there is a materialized view update desc, we create introduce it at the end // of the tree. @@ -458,9 +460,10 @@ private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticExce } } - private void patchUpAfterCTASorMaterializedView(final List> rootTasks, + private void patchUpAfterCTASorMaterializedView(final List> rootTasks, final HashSet outputs, - Task createTask) { + Task createTask, + boolean createTaskAfterMoveTask) { // clear the mapredWork output file from outputs for CTAS // DDLWork at the tail of the chain will have the output Iterator outIter = outputs.iterator(); @@ -479,18 +482,32 @@ private void patchUpAfterCTASorMaterializedView(final List> leaves = new LinkedHashSet<>(); getLeafTasks(rootTasks, leaves); assert (leaves.size() > 0); + // Target task is supposed to be the last task Task targetTask = createTask; for (Task task : leaves) { if (task instanceof StatsTask) { // StatsTask require table to already exist for (Task parentOfStatsTask : task.getParentTasks()) { - parentOfStatsTask.addDependentTask(createTask); + if (parentOfStatsTask instanceof MoveTask && !createTaskAfterMoveTask) { + // For partitioned CTAS, we need to create the table before the move task + // as we need to create the partitions in metastore and for that we should + // have already registered the table + interleaveTask(parentOfStatsTask, createTask); + } else { + parentOfStatsTask.addDependentTask(createTask); + } } for (Task parentOfCrtTblTask : createTask.getParentTasks()) { parentOfCrtTblTask.removeDependentTask(task); } createTask.addDependentTask(task); targetTask = task; + } else if (task instanceof MoveTask && !createTaskAfterMoveTask) { + // For partitioned CTAS, we need to create the table before the move task + // as we need to create the partitions in metastore and for that we should + // have already registered the table + interleaveTask(task, createTask); + targetTask = task; } else { task.addDependentTask(createTask); } @@ -522,6 +539,19 @@ private void patchUpAfterCTASorMaterializedView(final List dependentTask, Task task) { + for (Task parentOfStatsTask : dependentTask.getParentTasks()) { + parentOfStatsTask.addDependentTask(task); + } + for (Task parentOfCrtTblTask : task.getParentTasks()) { + parentOfCrtTblTask.removeDependentTask(dependentTask); + } + task.addDependentTask(dependentTask); + } + /** * A helper function to generate a column stats task on top of map-red task. The column stats * task fetches from the output of the map-red task, constructs the column stats object and diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 871844b30d..0fadf1b61f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -72,6 +72,7 @@ boolean isExternal; List cols; List partCols; + List partColNames; List bucketCols; List sortCols; int numBuckets; @@ -137,29 +138,28 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal } public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, - List cols, List partCols, - List bucketCols, List sortCols, int numBuckets, - String fieldDelim, String fieldEscape, String collItemDelim, - String mapKeyDelim, String lineDelim, String comment, String inputFormat, - String outputFormat, String location, String serName, - String storageHandler, - Map serdeProps, - Map tblProps, - boolean ifNotExists, List skewedColNames, List> skewedColValues, - boolean isCTAS, List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints, - List defaultConstraints, List checkConstraints) { - this(databaseName, tableName, isExternal, isTemporary, cols, partCols, - bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, - collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, - outputFormat, location, serName, storageHandler, serdeProps, - tblProps, ifNotExists, skewedColNames, skewedColValues, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + List cols, List partColNames, + List bucketCols, List sortCols, int numBuckets, + String fieldDelim, String fieldEscape, String collItemDelim, + String mapKeyDelim, String lineDelim, String comment, String inputFormat, + String outputFormat, String location, String serName, + String storageHandler, + Map serdeProps, + Map tblProps, + boolean ifNotExists, List skewedColNames, List> skewedColValues, + boolean isCTAS, List primaryKeys, List foreignKeys, + List uniqueConstraints, List notNullConstraints, + List defaultConstraints, List checkConstraints) { + this(databaseName, tableName, isExternal, isTemporary, cols, new ArrayList<>(), + bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, + collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, + outputFormat, location, serName, storageHandler, serdeProps, + tblProps, ifNotExists, skewedColNames, skewedColValues, + primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + this.partColNames = partColNames; this.isCTAS = isCTAS; - } - public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary, List cols, List partCols, List bucketCols, List sortCols, int numBuckets, @@ -257,6 +257,14 @@ public void setPartCols(ArrayList partCols) { this.partCols = partCols; } + public List getPartColNames() { + return partColNames; + } + + public void setPartColNames(ArrayList partColNames) { + this.partColNames = partColNames; + } + public List getPrimaryKeys() { return primaryKeys; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java index 6af7833022..c1aeb8f136 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java @@ -19,6 +19,7 @@ import java.io.Serializable; import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.regex.Pattern; @@ -28,7 +29,6 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.SemanticException; public class DynamicPartitionCtx implements Serializable { @@ -54,7 +54,37 @@ public DynamicPartitionCtx() { } - public DynamicPartitionCtx(Table tbl, Map partSpec, String defaultPartName, + /** + * This constructor is used for partitioned CTAS. Basically we pass the name of + * partitioned columns, which will all be dynamic partitions since the binding + * is done after executing the query in the CTAS. + */ + public DynamicPartitionCtx(List partColNames, String defaultPartName, + int maxParts) throws SemanticException { + this.partSpec = new LinkedHashMap<>(); + this.spNames = new ArrayList<>(); + this.dpNames = new ArrayList<>(); + for (String colName : partColNames) { + this.partSpec.put(colName, null); + this.dpNames.add(colName); + } + this.numBuckets = 0; + this.maxPartsPerNode = maxParts; + this.defaultPartName = defaultPartName; + + this.numDPCols = dpNames.size(); + this.numSPCols = spNames.size(); + this.spPath = null; + String confVal; + try { + confVal = Hive.get().getMetaConf(ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname); + } catch (HiveException e) { + throw new SemanticException(e); + } + this.whiteListPattern = confVal == null || confVal.isEmpty() ? null : Pattern.compile(confVal); + } + + public DynamicPartitionCtx(Map partSpec, String defaultPartName, int maxParts) throws SemanticException { this.partSpec = partSpec; this.spNames = new ArrayList(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index af2ece44dc..f32016725a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; @@ -44,6 +45,7 @@ private boolean isInsertOverwrite; // TODO: the below seem like they should just be combined into partitionDesc + private Table mdTable; private org.apache.hadoop.hive.ql.plan.TableDesc table; private Map partitionSpec; // NOTE: this partitionSpec has to be ordered map @@ -252,4 +254,12 @@ public int getStmtId() { public void setStmtId(int stmtId) { this.stmtId = stmtId; } + + public Table getMdTable() { + return mdTable; + } + + public void setMdTable(Table mdTable) { + this.mdTable = mdTable; + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index 71127c20b7..b369c9633c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -283,7 +283,7 @@ private FileSinkOperator getFileSink(AcidUtils.Operation writeType, partCols.add(new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, PARTCOL_NAME, "a", true)); Map partColMap= new LinkedHashMap(1); partColMap.put(PARTCOL_NAME, null); - DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(null, partColMap, "Sunday", 100); + DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(partColMap, "Sunday", 100); //todo: does this need the finalDestination? desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null, null, false, false); diff --git a/ql/src/test/queries/clientpositive/partition_ctas.q b/ql/src/test/queries/clientpositive/partition_ctas.q new file mode 100644 index 0000000000..470b86e78a --- /dev/null +++ b/ql/src/test/queries/clientpositive/partition_ctas.q @@ -0,0 +1,51 @@ +--! qt:dataset:src + +EXPLAIN +CREATE TABLE partition_ctas_1 PARTITIONED BY (key) AS +SELECT value, key FROM src where key > 200 and key < 300; + +CREATE TABLE partition_ctas_1 PARTITIONED BY (key) AS +SELECT value, key FROM src where key > 200 and key < 300; + +DESCRIBE FORMATTED partition_ctas_1; + +EXPLAIN +SELECT * FROM partition_ctas_1 where key = 238; + +SELECT * FROM partition_ctas_1 where key = 238; + +CREATE TABLE partition_ctas_2 PARTITIONED BY (value) AS +SELECT key, value FROM src where key > 200 and key < 300; + +EXPLAIN +SELECT * FROM partition_ctas_2 where value = 'val_238'; + +SELECT * FROM partition_ctas_2 where value = 'val_238'; + +EXPLAIN +SELECT value FROM partition_ctas_2 where key = 238; + +SELECT value FROM partition_ctas_2 where key = 238; + +CREATE TABLE partition_ctas_diff_order PARTITIONED BY (value) AS +SELECT value, key FROM src where key > 200 and key < 300; + +EXPLAIN +SELECT * FROM partition_ctas_diff_order where value = 'val_238'; + +SELECT * FROM partition_ctas_diff_order where value = 'val_238'; + +CREATE TABLE partition_ctas_complex_order PARTITIONED BY (c0, c4, c1) AS +SELECT concat(value, '_0') as c0, + concat(value, '_1') as c1, + concat(value, '_2') as c2, + concat(value, '_3') as c3, + concat(value, '_5') as c5, + concat(value, '_4') as c4 +FROM src where key > 200 and key < 240; + +-- c2, c3, c5, c0, c4, c1 +EXPLAIN +SELECT * FROM partition_ctas_complex_order where c0 = 'val_238_0'; + +SELECT * FROM partition_ctas_complex_order where c0 = 'val_238_0'; diff --git a/ql/src/test/results/clientpositive/llap/partition_ctas.q.out b/ql/src/test/results/clientpositive/llap/partition_ctas.q.out new file mode 100644 index 0000000000..bd77c8c7dc --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/partition_ctas.q.out @@ -0,0 +1,946 @@ +PREHOOK: query: EXPLAIN +CREATE TABLE partition_ctas_1 PARTITIONED BY (key) AS +SELECT value, key FROM src where key > 200 and key < 300 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN +CREATE TABLE partition_ctas_1 PARTITIONED BY (key) AS +SELECT value, key FROM src where key > 200 and key < 300 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-4 + Stage-3 depends on stages: Stage-0, Stage-4 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + filterExpr: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 300.0D)) (type: boolean) + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((UDFToDouble(key) < 300.0D) and (UDFToDouble(key) > 200.0D)) (type: boolean) + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: value (type: string), key (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partition_ctas_1 + Execution mode: vectorized, llap + LLAP IO: no inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Create Table Operator: + Create Table + columns: value string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + partition columns: key string + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partition_ctas_1 + + Stage: Stage-0 + Move Operator + tables: + partition: + key + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.partition_ctas_1 + + Stage: Stage-3 + Stats Work + Basic Stats Work: + +PREHOOK: query: CREATE TABLE partition_ctas_1 PARTITIONED BY (key) AS +SELECT value, key FROM src where key > 200 and key < 300 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@partition_ctas_1 +PREHOOK: Output: default@partition_ctas_1 +POSTHOOK: query: CREATE TABLE partition_ctas_1 PARTITIONED BY (key) AS +SELECT value, key FROM src where key > 200 and key < 300 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partition_ctas_1 +POSTHOOK: Output: default@partition_ctas_1@key=201 +POSTHOOK: Output: default@partition_ctas_1@key=202 +POSTHOOK: Output: default@partition_ctas_1@key=203 +POSTHOOK: Output: default@partition_ctas_1@key=205 +POSTHOOK: Output: default@partition_ctas_1@key=207 +POSTHOOK: Output: default@partition_ctas_1@key=208 +POSTHOOK: Output: default@partition_ctas_1@key=209 +POSTHOOK: Output: default@partition_ctas_1@key=213 +POSTHOOK: Output: default@partition_ctas_1@key=214 +POSTHOOK: Output: default@partition_ctas_1@key=216 +POSTHOOK: Output: default@partition_ctas_1@key=217 +POSTHOOK: Output: default@partition_ctas_1@key=218 +POSTHOOK: Output: default@partition_ctas_1@key=219 +POSTHOOK: Output: default@partition_ctas_1@key=221 +POSTHOOK: Output: default@partition_ctas_1@key=222 +POSTHOOK: Output: default@partition_ctas_1@key=223 +POSTHOOK: Output: default@partition_ctas_1@key=224 +POSTHOOK: Output: default@partition_ctas_1@key=226 +POSTHOOK: Output: default@partition_ctas_1@key=228 +POSTHOOK: Output: default@partition_ctas_1@key=229 +POSTHOOK: Output: default@partition_ctas_1@key=230 +POSTHOOK: Output: default@partition_ctas_1@key=233 +POSTHOOK: Output: default@partition_ctas_1@key=235 +POSTHOOK: Output: default@partition_ctas_1@key=237 +POSTHOOK: Output: default@partition_ctas_1@key=238 +POSTHOOK: Output: default@partition_ctas_1@key=239 +POSTHOOK: Output: default@partition_ctas_1@key=241 +POSTHOOK: Output: default@partition_ctas_1@key=242 +POSTHOOK: Output: default@partition_ctas_1@key=244 +POSTHOOK: Output: default@partition_ctas_1@key=247 +POSTHOOK: Output: default@partition_ctas_1@key=248 +POSTHOOK: Output: default@partition_ctas_1@key=249 +POSTHOOK: Output: default@partition_ctas_1@key=252 +POSTHOOK: Output: default@partition_ctas_1@key=255 +POSTHOOK: Output: default@partition_ctas_1@key=256 +POSTHOOK: Output: default@partition_ctas_1@key=257 +POSTHOOK: Output: default@partition_ctas_1@key=258 +POSTHOOK: Output: default@partition_ctas_1@key=260 +POSTHOOK: Output: default@partition_ctas_1@key=262 +POSTHOOK: Output: default@partition_ctas_1@key=263 +POSTHOOK: Output: default@partition_ctas_1@key=265 +POSTHOOK: Output: default@partition_ctas_1@key=266 +POSTHOOK: Output: default@partition_ctas_1@key=272 +POSTHOOK: Output: default@partition_ctas_1@key=273 +POSTHOOK: Output: default@partition_ctas_1@key=274 +POSTHOOK: Output: default@partition_ctas_1@key=275 +POSTHOOK: Output: default@partition_ctas_1@key=277 +POSTHOOK: Output: default@partition_ctas_1@key=278 +POSTHOOK: Output: default@partition_ctas_1@key=280 +POSTHOOK: Output: default@partition_ctas_1@key=281 +POSTHOOK: Output: default@partition_ctas_1@key=282 +POSTHOOK: Output: default@partition_ctas_1@key=283 +POSTHOOK: Output: default@partition_ctas_1@key=284 +POSTHOOK: Output: default@partition_ctas_1@key=285 +POSTHOOK: Output: default@partition_ctas_1@key=286 +POSTHOOK: Output: default@partition_ctas_1@key=287 +POSTHOOK: Output: default@partition_ctas_1@key=288 +POSTHOOK: Output: default@partition_ctas_1@key=289 +POSTHOOK: Output: default@partition_ctas_1@key=291 +POSTHOOK: Output: default@partition_ctas_1@key=292 +POSTHOOK: Output: default@partition_ctas_1@key=296 +POSTHOOK: Output: default@partition_ctas_1@key=298 +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=201).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=202).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=203).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=205).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=207).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=208).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=209).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=213).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=214).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=216).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=217).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=218).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=219).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=221).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=222).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=223).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=224).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=226).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=228).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=229).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=230).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=233).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=235).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=237).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=238).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=239).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=241).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=242).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=244).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=247).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=248).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=249).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=252).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=255).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=256).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=257).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=258).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=260).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=262).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=263).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=265).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=266).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=272).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=273).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=274).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=275).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=277).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=278).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=280).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=281).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=282).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=283).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=284).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=285).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=286).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=287).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=288).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=289).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=291).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=292).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=296).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_1 PARTITION(key=298).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED partition_ctas_1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partition_ctas_1 +POSTHOOK: query: DESCRIBE FORMATTED partition_ctas_1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partition_ctas_1 +# col_name data_type comment +value string + +# Partition Information +# col_name data_type comment +key string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 62 + numPartitions 62 + numRows 101 + rawDataSize 707 + totalSize 808 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_1 where key = 238 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_1 where key = 238 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: partition_ctas_1 + filterExpr: (238.0D = 238.0D) (type: boolean) + Select Operator + expressions: value (type: string), key (type: string) + outputColumnNames: _col0, _col1 + ListSink + +PREHOOK: query: SELECT * FROM partition_ctas_1 where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_ctas_1 +PREHOOK: Input: default@partition_ctas_1@key=238 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM partition_ctas_1 where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_ctas_1 +POSTHOOK: Input: default@partition_ctas_1@key=238 +#### A masked pattern was here #### +val_238 238 +val_238 238 +PREHOOK: query: CREATE TABLE partition_ctas_2 PARTITIONED BY (value) AS +SELECT key, value FROM src where key > 200 and key < 300 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@partition_ctas_2 +PREHOOK: Output: default@partition_ctas_2 +POSTHOOK: query: CREATE TABLE partition_ctas_2 PARTITIONED BY (value) AS +SELECT key, value FROM src where key > 200 and key < 300 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partition_ctas_2 +POSTHOOK: Output: default@partition_ctas_2@value=val_201 +POSTHOOK: Output: default@partition_ctas_2@value=val_202 +POSTHOOK: Output: default@partition_ctas_2@value=val_203 +POSTHOOK: Output: default@partition_ctas_2@value=val_205 +POSTHOOK: Output: default@partition_ctas_2@value=val_207 +POSTHOOK: Output: default@partition_ctas_2@value=val_208 +POSTHOOK: Output: default@partition_ctas_2@value=val_209 +POSTHOOK: Output: default@partition_ctas_2@value=val_213 +POSTHOOK: Output: default@partition_ctas_2@value=val_214 +POSTHOOK: Output: default@partition_ctas_2@value=val_216 +POSTHOOK: Output: default@partition_ctas_2@value=val_217 +POSTHOOK: Output: default@partition_ctas_2@value=val_218 +POSTHOOK: Output: default@partition_ctas_2@value=val_219 +POSTHOOK: Output: default@partition_ctas_2@value=val_221 +POSTHOOK: Output: default@partition_ctas_2@value=val_222 +POSTHOOK: Output: default@partition_ctas_2@value=val_223 +POSTHOOK: Output: default@partition_ctas_2@value=val_224 +POSTHOOK: Output: default@partition_ctas_2@value=val_226 +POSTHOOK: Output: default@partition_ctas_2@value=val_228 +POSTHOOK: Output: default@partition_ctas_2@value=val_229 +POSTHOOK: Output: default@partition_ctas_2@value=val_230 +POSTHOOK: Output: default@partition_ctas_2@value=val_233 +POSTHOOK: Output: default@partition_ctas_2@value=val_235 +POSTHOOK: Output: default@partition_ctas_2@value=val_237 +POSTHOOK: Output: default@partition_ctas_2@value=val_238 +POSTHOOK: Output: default@partition_ctas_2@value=val_239 +POSTHOOK: Output: default@partition_ctas_2@value=val_241 +POSTHOOK: Output: default@partition_ctas_2@value=val_242 +POSTHOOK: Output: default@partition_ctas_2@value=val_244 +POSTHOOK: Output: default@partition_ctas_2@value=val_247 +POSTHOOK: Output: default@partition_ctas_2@value=val_248 +POSTHOOK: Output: default@partition_ctas_2@value=val_249 +POSTHOOK: Output: default@partition_ctas_2@value=val_252 +POSTHOOK: Output: default@partition_ctas_2@value=val_255 +POSTHOOK: Output: default@partition_ctas_2@value=val_256 +POSTHOOK: Output: default@partition_ctas_2@value=val_257 +POSTHOOK: Output: default@partition_ctas_2@value=val_258 +POSTHOOK: Output: default@partition_ctas_2@value=val_260 +POSTHOOK: Output: default@partition_ctas_2@value=val_262 +POSTHOOK: Output: default@partition_ctas_2@value=val_263 +POSTHOOK: Output: default@partition_ctas_2@value=val_265 +POSTHOOK: Output: default@partition_ctas_2@value=val_266 +POSTHOOK: Output: default@partition_ctas_2@value=val_272 +POSTHOOK: Output: default@partition_ctas_2@value=val_273 +POSTHOOK: Output: default@partition_ctas_2@value=val_274 +POSTHOOK: Output: default@partition_ctas_2@value=val_275 +POSTHOOK: Output: default@partition_ctas_2@value=val_277 +POSTHOOK: Output: default@partition_ctas_2@value=val_278 +POSTHOOK: Output: default@partition_ctas_2@value=val_280 +POSTHOOK: Output: default@partition_ctas_2@value=val_281 +POSTHOOK: Output: default@partition_ctas_2@value=val_282 +POSTHOOK: Output: default@partition_ctas_2@value=val_283 +POSTHOOK: Output: default@partition_ctas_2@value=val_284 +POSTHOOK: Output: default@partition_ctas_2@value=val_285 +POSTHOOK: Output: default@partition_ctas_2@value=val_286 +POSTHOOK: Output: default@partition_ctas_2@value=val_287 +POSTHOOK: Output: default@partition_ctas_2@value=val_288 +POSTHOOK: Output: default@partition_ctas_2@value=val_289 +POSTHOOK: Output: default@partition_ctas_2@value=val_291 +POSTHOOK: Output: default@partition_ctas_2@value=val_292 +POSTHOOK: Output: default@partition_ctas_2@value=val_296 +POSTHOOK: Output: default@partition_ctas_2@value=val_298 +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_201).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_202).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_203).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_205).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_207).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_208).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_209).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_213).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_214).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_216).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_217).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_218).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_219).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_221).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_222).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_223).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_224).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_226).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_228).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_229).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_230).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_233).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_235).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_237).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_238).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_239).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_241).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_242).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_244).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_247).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_248).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_249).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_252).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_255).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_256).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_257).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_258).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_260).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_262).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_263).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_265).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_266).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_272).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_273).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_274).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_275).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_277).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_278).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_280).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_281).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_282).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_283).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_284).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_285).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_286).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_287).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_288).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_289).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_291).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_292).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_296).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_2 PARTITION(value=val_298).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_2 where value = 'val_238' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_2 where value = 'val_238' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: partition_ctas_2 + filterExpr: (value = 'val_238') (type: boolean) + Select Operator + expressions: key (type: string), 'val_238' (type: string) + outputColumnNames: _col0, _col1 + ListSink + +PREHOOK: query: SELECT * FROM partition_ctas_2 where value = 'val_238' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_ctas_2 +PREHOOK: Input: default@partition_ctas_2@value=val_238 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM partition_ctas_2 where value = 'val_238' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_ctas_2 +POSTHOOK: Input: default@partition_ctas_2@value=val_238 +#### A masked pattern was here #### +238 val_238 +238 val_238 +PREHOOK: query: EXPLAIN +SELECT value FROM partition_ctas_2 where key = 238 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT value FROM partition_ctas_2 where key = 238 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: partition_ctas_2 + filterExpr: (UDFToDouble(key) = 238.0D) (type: boolean) + Filter Operator + predicate: (UDFToDouble(key) = 238.0D) (type: boolean) + Select Operator + expressions: value (type: string) + outputColumnNames: _col0 + ListSink + +PREHOOK: query: SELECT value FROM partition_ctas_2 where key = 238 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_ctas_2 +PREHOOK: Input: default@partition_ctas_2@value=val_201 +PREHOOK: Input: default@partition_ctas_2@value=val_202 +PREHOOK: Input: default@partition_ctas_2@value=val_203 +PREHOOK: Input: default@partition_ctas_2@value=val_205 +PREHOOK: Input: default@partition_ctas_2@value=val_207 +PREHOOK: Input: default@partition_ctas_2@value=val_208 +PREHOOK: Input: default@partition_ctas_2@value=val_209 +PREHOOK: Input: default@partition_ctas_2@value=val_213 +PREHOOK: Input: default@partition_ctas_2@value=val_214 +PREHOOK: Input: default@partition_ctas_2@value=val_216 +PREHOOK: Input: default@partition_ctas_2@value=val_217 +PREHOOK: Input: default@partition_ctas_2@value=val_218 +PREHOOK: Input: default@partition_ctas_2@value=val_219 +PREHOOK: Input: default@partition_ctas_2@value=val_221 +PREHOOK: Input: default@partition_ctas_2@value=val_222 +PREHOOK: Input: default@partition_ctas_2@value=val_223 +PREHOOK: Input: default@partition_ctas_2@value=val_224 +PREHOOK: Input: default@partition_ctas_2@value=val_226 +PREHOOK: Input: default@partition_ctas_2@value=val_228 +PREHOOK: Input: default@partition_ctas_2@value=val_229 +PREHOOK: Input: default@partition_ctas_2@value=val_230 +PREHOOK: Input: default@partition_ctas_2@value=val_233 +PREHOOK: Input: default@partition_ctas_2@value=val_235 +PREHOOK: Input: default@partition_ctas_2@value=val_237 +PREHOOK: Input: default@partition_ctas_2@value=val_238 +PREHOOK: Input: default@partition_ctas_2@value=val_239 +PREHOOK: Input: default@partition_ctas_2@value=val_241 +PREHOOK: Input: default@partition_ctas_2@value=val_242 +PREHOOK: Input: default@partition_ctas_2@value=val_244 +PREHOOK: Input: default@partition_ctas_2@value=val_247 +PREHOOK: Input: default@partition_ctas_2@value=val_248 +PREHOOK: Input: default@partition_ctas_2@value=val_249 +PREHOOK: Input: default@partition_ctas_2@value=val_252 +PREHOOK: Input: default@partition_ctas_2@value=val_255 +PREHOOK: Input: default@partition_ctas_2@value=val_256 +PREHOOK: Input: default@partition_ctas_2@value=val_257 +PREHOOK: Input: default@partition_ctas_2@value=val_258 +PREHOOK: Input: default@partition_ctas_2@value=val_260 +PREHOOK: Input: default@partition_ctas_2@value=val_262 +PREHOOK: Input: default@partition_ctas_2@value=val_263 +PREHOOK: Input: default@partition_ctas_2@value=val_265 +PREHOOK: Input: default@partition_ctas_2@value=val_266 +PREHOOK: Input: default@partition_ctas_2@value=val_272 +PREHOOK: Input: default@partition_ctas_2@value=val_273 +PREHOOK: Input: default@partition_ctas_2@value=val_274 +PREHOOK: Input: default@partition_ctas_2@value=val_275 +PREHOOK: Input: default@partition_ctas_2@value=val_277 +PREHOOK: Input: default@partition_ctas_2@value=val_278 +PREHOOK: Input: default@partition_ctas_2@value=val_280 +PREHOOK: Input: default@partition_ctas_2@value=val_281 +PREHOOK: Input: default@partition_ctas_2@value=val_282 +PREHOOK: Input: default@partition_ctas_2@value=val_283 +PREHOOK: Input: default@partition_ctas_2@value=val_284 +PREHOOK: Input: default@partition_ctas_2@value=val_285 +PREHOOK: Input: default@partition_ctas_2@value=val_286 +PREHOOK: Input: default@partition_ctas_2@value=val_287 +PREHOOK: Input: default@partition_ctas_2@value=val_288 +PREHOOK: Input: default@partition_ctas_2@value=val_289 +PREHOOK: Input: default@partition_ctas_2@value=val_291 +PREHOOK: Input: default@partition_ctas_2@value=val_292 +PREHOOK: Input: default@partition_ctas_2@value=val_296 +PREHOOK: Input: default@partition_ctas_2@value=val_298 +#### A masked pattern was here #### +POSTHOOK: query: SELECT value FROM partition_ctas_2 where key = 238 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_ctas_2 +POSTHOOK: Input: default@partition_ctas_2@value=val_201 +POSTHOOK: Input: default@partition_ctas_2@value=val_202 +POSTHOOK: Input: default@partition_ctas_2@value=val_203 +POSTHOOK: Input: default@partition_ctas_2@value=val_205 +POSTHOOK: Input: default@partition_ctas_2@value=val_207 +POSTHOOK: Input: default@partition_ctas_2@value=val_208 +POSTHOOK: Input: default@partition_ctas_2@value=val_209 +POSTHOOK: Input: default@partition_ctas_2@value=val_213 +POSTHOOK: Input: default@partition_ctas_2@value=val_214 +POSTHOOK: Input: default@partition_ctas_2@value=val_216 +POSTHOOK: Input: default@partition_ctas_2@value=val_217 +POSTHOOK: Input: default@partition_ctas_2@value=val_218 +POSTHOOK: Input: default@partition_ctas_2@value=val_219 +POSTHOOK: Input: default@partition_ctas_2@value=val_221 +POSTHOOK: Input: default@partition_ctas_2@value=val_222 +POSTHOOK: Input: default@partition_ctas_2@value=val_223 +POSTHOOK: Input: default@partition_ctas_2@value=val_224 +POSTHOOK: Input: default@partition_ctas_2@value=val_226 +POSTHOOK: Input: default@partition_ctas_2@value=val_228 +POSTHOOK: Input: default@partition_ctas_2@value=val_229 +POSTHOOK: Input: default@partition_ctas_2@value=val_230 +POSTHOOK: Input: default@partition_ctas_2@value=val_233 +POSTHOOK: Input: default@partition_ctas_2@value=val_235 +POSTHOOK: Input: default@partition_ctas_2@value=val_237 +POSTHOOK: Input: default@partition_ctas_2@value=val_238 +POSTHOOK: Input: default@partition_ctas_2@value=val_239 +POSTHOOK: Input: default@partition_ctas_2@value=val_241 +POSTHOOK: Input: default@partition_ctas_2@value=val_242 +POSTHOOK: Input: default@partition_ctas_2@value=val_244 +POSTHOOK: Input: default@partition_ctas_2@value=val_247 +POSTHOOK: Input: default@partition_ctas_2@value=val_248 +POSTHOOK: Input: default@partition_ctas_2@value=val_249 +POSTHOOK: Input: default@partition_ctas_2@value=val_252 +POSTHOOK: Input: default@partition_ctas_2@value=val_255 +POSTHOOK: Input: default@partition_ctas_2@value=val_256 +POSTHOOK: Input: default@partition_ctas_2@value=val_257 +POSTHOOK: Input: default@partition_ctas_2@value=val_258 +POSTHOOK: Input: default@partition_ctas_2@value=val_260 +POSTHOOK: Input: default@partition_ctas_2@value=val_262 +POSTHOOK: Input: default@partition_ctas_2@value=val_263 +POSTHOOK: Input: default@partition_ctas_2@value=val_265 +POSTHOOK: Input: default@partition_ctas_2@value=val_266 +POSTHOOK: Input: default@partition_ctas_2@value=val_272 +POSTHOOK: Input: default@partition_ctas_2@value=val_273 +POSTHOOK: Input: default@partition_ctas_2@value=val_274 +POSTHOOK: Input: default@partition_ctas_2@value=val_275 +POSTHOOK: Input: default@partition_ctas_2@value=val_277 +POSTHOOK: Input: default@partition_ctas_2@value=val_278 +POSTHOOK: Input: default@partition_ctas_2@value=val_280 +POSTHOOK: Input: default@partition_ctas_2@value=val_281 +POSTHOOK: Input: default@partition_ctas_2@value=val_282 +POSTHOOK: Input: default@partition_ctas_2@value=val_283 +POSTHOOK: Input: default@partition_ctas_2@value=val_284 +POSTHOOK: Input: default@partition_ctas_2@value=val_285 +POSTHOOK: Input: default@partition_ctas_2@value=val_286 +POSTHOOK: Input: default@partition_ctas_2@value=val_287 +POSTHOOK: Input: default@partition_ctas_2@value=val_288 +POSTHOOK: Input: default@partition_ctas_2@value=val_289 +POSTHOOK: Input: default@partition_ctas_2@value=val_291 +POSTHOOK: Input: default@partition_ctas_2@value=val_292 +POSTHOOK: Input: default@partition_ctas_2@value=val_296 +POSTHOOK: Input: default@partition_ctas_2@value=val_298 +#### A masked pattern was here #### +val_238 +val_238 +PREHOOK: query: CREATE TABLE partition_ctas_diff_order PARTITIONED BY (value) AS +SELECT value, key FROM src where key > 200 and key < 300 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@partition_ctas_diff_order +PREHOOK: Output: default@partition_ctas_diff_order +POSTHOOK: query: CREATE TABLE partition_ctas_diff_order PARTITIONED BY (value) AS +SELECT value, key FROM src where key > 200 and key < 300 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partition_ctas_diff_order +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_201 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_202 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_203 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_205 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_207 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_208 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_209 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_213 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_214 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_216 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_217 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_218 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_219 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_221 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_222 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_223 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_224 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_226 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_228 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_229 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_230 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_233 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_235 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_237 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_238 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_239 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_241 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_242 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_244 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_247 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_248 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_249 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_252 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_255 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_256 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_257 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_258 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_260 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_262 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_263 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_265 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_266 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_272 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_273 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_274 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_275 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_277 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_278 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_280 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_281 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_282 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_283 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_284 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_285 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_286 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_287 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_288 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_289 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_291 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_292 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_296 +POSTHOOK: Output: default@partition_ctas_diff_order@value=val_298 +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_201).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_202).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_203).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_205).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_207).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_208).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_209).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_213).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_214).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_216).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_217).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_218).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_219).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_221).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_222).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_223).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_224).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_226).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_228).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_229).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_230).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_233).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_235).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_237).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_238).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_239).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_241).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_242).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_244).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_247).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_248).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_249).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_252).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_255).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_256).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_257).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_258).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_260).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_262).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_263).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_265).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_266).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_272).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_273).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_274).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_275).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_277).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_278).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_280).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_281).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_282).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_283).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_284).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_285).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_286).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_287).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_288).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_289).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_291).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_292).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_296).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_diff_order PARTITION(value=val_298).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_diff_order where value = 'val_238' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_diff_order where value = 'val_238' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: partition_ctas_diff_order + filterExpr: (value = 'val_238') (type: boolean) + Select Operator + expressions: key (type: string), 'val_238' (type: string) + outputColumnNames: _col0, _col1 + ListSink + +PREHOOK: query: SELECT * FROM partition_ctas_diff_order where value = 'val_238' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_ctas_diff_order +PREHOOK: Input: default@partition_ctas_diff_order@value=val_238 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM partition_ctas_diff_order where value = 'val_238' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_ctas_diff_order +POSTHOOK: Input: default@partition_ctas_diff_order@value=val_238 +#### A masked pattern was here #### +238 val_238 +238 val_238 +PREHOOK: query: CREATE TABLE partition_ctas_complex_order PARTITIONED BY (c0, c4, c1) AS +SELECT concat(value, '_0') as c0, + concat(value, '_1') as c1, + concat(value, '_2') as c2, + concat(value, '_3') as c3, + concat(value, '_5') as c5, + concat(value, '_4') as c4 +FROM src where key > 200 and key < 240 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@partition_ctas_complex_order +PREHOOK: Output: default@partition_ctas_complex_order +POSTHOOK: query: CREATE TABLE partition_ctas_complex_order PARTITIONED BY (c0, c4, c1) AS +SELECT concat(value, '_0') as c0, + concat(value, '_1') as c1, + concat(value, '_2') as c2, + concat(value, '_3') as c3, + concat(value, '_5') as c5, + concat(value, '_4') as c4 +FROM src where key > 200 and key < 240 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partition_ctas_complex_order +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_201_0/c4=val_201_4/c1=val_201_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_202_0/c4=val_202_4/c1=val_202_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_203_0/c4=val_203_4/c1=val_203_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_205_0/c4=val_205_4/c1=val_205_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_207_0/c4=val_207_4/c1=val_207_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_208_0/c4=val_208_4/c1=val_208_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_209_0/c4=val_209_4/c1=val_209_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_213_0/c4=val_213_4/c1=val_213_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_214_0/c4=val_214_4/c1=val_214_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_216_0/c4=val_216_4/c1=val_216_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_217_0/c4=val_217_4/c1=val_217_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_218_0/c4=val_218_4/c1=val_218_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_219_0/c4=val_219_4/c1=val_219_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_221_0/c4=val_221_4/c1=val_221_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_222_0/c4=val_222_4/c1=val_222_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_223_0/c4=val_223_4/c1=val_223_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_224_0/c4=val_224_4/c1=val_224_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_226_0/c4=val_226_4/c1=val_226_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_228_0/c4=val_228_4/c1=val_228_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_229_0/c4=val_229_4/c1=val_229_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_230_0/c4=val_230_4/c1=val_230_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_233_0/c4=val_233_4/c1=val_233_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_235_0/c4=val_235_4/c1=val_235_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_237_0/c4=val_237_4/c1=val_237_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_238_0/c4=val_238_4/c1=val_238_1 +POSTHOOK: Output: default@partition_ctas_complex_order@c0=val_239_0/c4=val_239_4/c1=val_239_1 +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_201_0,c4=val_201_4,c1=val_201_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_201_0,c4=val_201_4,c1=val_201_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_201_0,c4=val_201_4,c1=val_201_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_202_0,c4=val_202_4,c1=val_202_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_202_0,c4=val_202_4,c1=val_202_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_202_0,c4=val_202_4,c1=val_202_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_203_0,c4=val_203_4,c1=val_203_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_203_0,c4=val_203_4,c1=val_203_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_203_0,c4=val_203_4,c1=val_203_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_205_0,c4=val_205_4,c1=val_205_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_205_0,c4=val_205_4,c1=val_205_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_205_0,c4=val_205_4,c1=val_205_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_207_0,c4=val_207_4,c1=val_207_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_207_0,c4=val_207_4,c1=val_207_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_207_0,c4=val_207_4,c1=val_207_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_208_0,c4=val_208_4,c1=val_208_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_208_0,c4=val_208_4,c1=val_208_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_208_0,c4=val_208_4,c1=val_208_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_209_0,c4=val_209_4,c1=val_209_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_209_0,c4=val_209_4,c1=val_209_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_209_0,c4=val_209_4,c1=val_209_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_213_0,c4=val_213_4,c1=val_213_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_213_0,c4=val_213_4,c1=val_213_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_213_0,c4=val_213_4,c1=val_213_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_214_0,c4=val_214_4,c1=val_214_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_214_0,c4=val_214_4,c1=val_214_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_214_0,c4=val_214_4,c1=val_214_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_216_0,c4=val_216_4,c1=val_216_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_216_0,c4=val_216_4,c1=val_216_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_216_0,c4=val_216_4,c1=val_216_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_217_0,c4=val_217_4,c1=val_217_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_217_0,c4=val_217_4,c1=val_217_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_217_0,c4=val_217_4,c1=val_217_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_218_0,c4=val_218_4,c1=val_218_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_218_0,c4=val_218_4,c1=val_218_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_218_0,c4=val_218_4,c1=val_218_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_219_0,c4=val_219_4,c1=val_219_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_219_0,c4=val_219_4,c1=val_219_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_219_0,c4=val_219_4,c1=val_219_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_221_0,c4=val_221_4,c1=val_221_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_221_0,c4=val_221_4,c1=val_221_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_221_0,c4=val_221_4,c1=val_221_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_222_0,c4=val_222_4,c1=val_222_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_222_0,c4=val_222_4,c1=val_222_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_222_0,c4=val_222_4,c1=val_222_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_223_0,c4=val_223_4,c1=val_223_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_223_0,c4=val_223_4,c1=val_223_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_223_0,c4=val_223_4,c1=val_223_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_224_0,c4=val_224_4,c1=val_224_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_224_0,c4=val_224_4,c1=val_224_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_224_0,c4=val_224_4,c1=val_224_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_226_0,c4=val_226_4,c1=val_226_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_226_0,c4=val_226_4,c1=val_226_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_226_0,c4=val_226_4,c1=val_226_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_228_0,c4=val_228_4,c1=val_228_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_228_0,c4=val_228_4,c1=val_228_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_228_0,c4=val_228_4,c1=val_228_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_229_0,c4=val_229_4,c1=val_229_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_229_0,c4=val_229_4,c1=val_229_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_229_0,c4=val_229_4,c1=val_229_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_230_0,c4=val_230_4,c1=val_230_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_230_0,c4=val_230_4,c1=val_230_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_230_0,c4=val_230_4,c1=val_230_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_233_0,c4=val_233_4,c1=val_233_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_233_0,c4=val_233_4,c1=val_233_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_233_0,c4=val_233_4,c1=val_233_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_235_0,c4=val_235_4,c1=val_235_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_235_0,c4=val_235_4,c1=val_235_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_235_0,c4=val_235_4,c1=val_235_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_237_0,c4=val_237_4,c1=val_237_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_237_0,c4=val_237_4,c1=val_237_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_237_0,c4=val_237_4,c1=val_237_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_238_0,c4=val_238_4,c1=val_238_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_238_0,c4=val_238_4,c1=val_238_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_238_0,c4=val_238_4,c1=val_238_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_239_0,c4=val_239_4,c1=val_239_1).c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_239_0,c4=val_239_4,c1=val_239_1).c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_ctas_complex_order PARTITION(c0=val_239_0,c4=val_239_4,c1=val_239_1).c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_complex_order where c0 = 'val_238_0' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM partition_ctas_complex_order where c0 = 'val_238_0' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: partition_ctas_complex_order + filterExpr: (c0 = 'val_238_0') (type: boolean) + Select Operator + expressions: c2 (type: string), c3 (type: string), c5 (type: string), 'val_238_0' (type: string), c4 (type: string), c1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + ListSink + +PREHOOK: query: SELECT * FROM partition_ctas_complex_order where c0 = 'val_238_0' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_ctas_complex_order +PREHOOK: Input: default@partition_ctas_complex_order@c0=val_238_0/c4=val_238_4/c1=val_238_1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM partition_ctas_complex_order where c0 = 'val_238_0' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_ctas_complex_order +POSTHOOK: Input: default@partition_ctas_complex_order@c0=val_238_0/c4=val_238_4/c1=val_238_1 +#### A masked pattern was here #### +val_238_2 val_238_3 val_238_5 val_238_0 val_238_4 val_238_1 +val_238_2 val_238_3 val_238_5 val_238_0 val_238_4 val_238_1