From cdabf285695b0d1e93339e6e3796b4362018a2f3 Mon Sep 17 00:00:00 2001 From: Ashutosh Chauhan Date: Wed, 21 Oct 2015 13:36:49 -0700 Subject: [PATCH] HIVE-12224 : Remove HOLD_DDLTIME --- .../hadoop/hive/ql/history/TestHiveHistory.java | 2 +- .../java/org/apache/hadoop/hive/ql/ErrorMsg.java | 4 - .../org/apache/hadoop/hive/ql/exec/MoveTask.java | 8 +- .../org/apache/hadoop/hive/ql/metadata/Hive.java | 61 +++---- .../org/apache/hadoop/hive/ql/parse/HiveLexer.g | 1 - .../org/apache/hadoop/hive/ql/parse/HiveParser.g | 1 - .../hadoop/hive/ql/parse/SelectClauseParser.g | 1 - .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 46 ----- .../apache/hadoop/hive/ql/plan/LoadTableDesc.java | 14 -- .../apache/hadoop/hive/ql/exec/TestExecDriver.java | 2 +- ql/src/test/queries/clientnegative/ddltime.q | 6 - ql/src/test/queries/clientpositive/ddltime.q | 45 ----- ql/src/test/results/clientnegative/ddltime.q.out | 9 - ql/src/test/results/clientpositive/ddltime.q.out | 188 --------------------- 14 files changed, 31 insertions(+), 357 deletions(-) delete mode 100644 ql/src/test/queries/clientnegative/ddltime.q delete mode 100644 ql/src/test/queries/clientpositive/ddltime.q delete mode 100644 ql/src/test/results/clientnegative/ddltime.q.out delete mode 100644 ql/src/test/results/clientpositive/ddltime.q.out diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 76c1636..c046708 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -103,7 +103,7 @@ protected void setUp() { db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, false, false, false, false); + db.loadTable(hadoopDataFile[i], src, false, false, false, false); i++; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 87c2830..c080570 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -25,8 +25,6 @@ import java.util.regex.Pattern; import org.antlr.runtime.tree.Tree; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin; @@ -192,8 +190,6 @@ UNARCHIVE_ON_MULI_PARTS(10109, "ARCHIVE can only be run on a single partition"), ARCHIVE_ON_TABLE(10110, "ARCHIVE can only be run on partitions"), RESERVED_PART_VAL(10111, "Partition value contains a reserved substring"), - HOLD_DDLTIME_ON_NONEXIST_PARTITIONS(10112, "HOLD_DDLTIME hint cannot be applied to dynamic " + - "partitions or non-existent partitions"), OFFLINE_TABLE_OR_PARTITION(10113, "Query against an offline table or partition"), OUTERJOIN_USES_FILTERS(10114, "The query results could be wrong. " + "Turn on hive.outerjoin.supports.filters"), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 7e257e5..920bb1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -310,7 +310,7 @@ public int execute(DriverContext driverContext) { if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); db.loadTable(tbd.getSourcePath(), tbd.getTable() - .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime(), work.isSrcLocal(), + .getTableName(), tbd.getReplace(), work.isSrcLocal(), isSkewedStoredAsDirs(tbd), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID); if (work.getOutputs() != null) { @@ -392,7 +392,6 @@ public int execute(DriverContext driverContext) { tbd.getPartitionSpec(), tbd.getReplace(), dpCtx.getNumDPCols(), - tbd.getHoldDDLTime(), isSkewedStoredAsDirs(tbd), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, SessionState.get().getTxnMgr().getCurrentTxnId()); @@ -451,11 +450,10 @@ public int execute(DriverContext driverContext) { tbd.getPartitionSpec()); db.validatePartitionNameCharacters(partVals); db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(), - tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(), + tbd.getPartitionSpec(), tbd.getReplace(), tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID); - Partition partn = db.getPartition(table, tbd.getPartitionSpec(), - false); + Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); if (bucketCols != null || sortCols != null) { updatePartitionBucketSortColumns(table, partn, bucketCols, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 4e3be0d..c64d8d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -112,7 +112,6 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -1378,11 +1377,11 @@ public Database getDatabaseCurrent() throws HiveException { } public void loadPartition(Path loadPath, String tableName, - Map partSpec, boolean replace, boolean holdDDLTime, + Map partSpec, boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid) throws HiveException { Table tbl = getTable(tableName); - loadPartition(loadPath, tbl, partSpec, replace, holdDDLTime, inheritTableSpecs, + loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs, isSkewedStoreAsSubdir, isSrcLocal, isAcid); } @@ -1401,7 +1400,6 @@ public void loadPartition(Path loadPath, String tableName, * @param replace * if true - replace files in the partition, otherwise add files to * the partition - * @param holdDDLTime if true, force [re]create the partition * @param inheritTableSpecs if true, on [re]creating the partition, take the * location/inputformat/outputformat/serde details from table spec * @param isSrcLocal @@ -1409,7 +1407,7 @@ public void loadPartition(Path loadPath, String tableName, * @param isAcid true if this is an ACID operation */ public Partition loadPartition(Path loadPath, Table tbl, - Map partSpec, boolean replace, boolean holdDDLTime, + Map partSpec, boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid) throws HiveException { Path tblDataLocationPath = tbl.getDataLocation(); @@ -1464,26 +1462,24 @@ public Partition loadPartition(Path loadPath, Table tbl, Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles); } - boolean forceCreate = (!holdDDLTime) ? true : false; - newTPart = getPartition(tbl, partSpec, forceCreate, newPartPath.toString(), + newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs, newFiles); // recreate the partition if it existed before - if (!holdDDLTime) { - if (isSkewedStoreAsSubdir) { - org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition(); - SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo(); - /* Construct list bucketing location mappings from sub-directory name. */ - Map, String> skewedColValueLocationMaps = constructListBucketingLocationMap( - newPartPath, skewedInfo); - /* Add list bucketing location mappings. */ - skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); - newCreatedTpart.getSd().setSkewedInfo(skewedInfo); - alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newCreatedTpart)); - newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs, - newFiles); - return new Partition(tbl, newCreatedTpart); - } + if (isSkewedStoreAsSubdir) { + org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition(); + SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo(); + /* Construct list bucketing location mappings from sub-directory name. */ + Map, String> skewedColValueLocationMaps = constructListBucketingLocationMap( + newPartPath, skewedInfo); + /* Add list bucketing location mappings. */ + skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); + newCreatedTpart.getSd().setSkewedInfo(skewedInfo); + alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newCreatedTpart)); + newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs, + newFiles); + return new Partition(tbl, newCreatedTpart); } + } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -1589,7 +1585,6 @@ private void constructOneLBLocationMap(FileStatus fSta, * @param partSpec * @param replace * @param numDP number of dynamic partitions - * @param holdDDLTime * @param listBucketingEnabled * @param isAcid true if this is an ACID operation * @param txnId txnId, can be 0 unless isAcid == true @@ -1598,7 +1593,7 @@ private void constructOneLBLocationMap(FileStatus fSta, */ public Map, Partition> loadDynamicPartitions(Path loadPath, String tableName, Map partSpec, boolean replace, - int numDP, boolean holdDDLTime, boolean listBucketingEnabled, boolean isAcid, long txnId) + int numDP, boolean listBucketingEnabled, boolean isAcid, long txnId) throws HiveException { Set validPartitions = new HashSet(); @@ -1661,7 +1656,7 @@ private void constructOneLBLocationMap(FileStatus fSta, LinkedHashMap fullPartSpec = new LinkedHashMap(partSpec); Warehouse.makeSpecFromName(fullPartSpec, partPath); Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, - holdDDLTime, true, listBucketingEnabled, false, isAcid); + true, listBucketingEnabled, false, isAcid); partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { InPlaceUpdates.rePositionCursor(ps); @@ -1696,7 +1691,6 @@ private void constructOneLBLocationMap(FileStatus fSta, * name of table to be loaded. * @param replace * if true - replace files in the table, otherwise add files to table - * @param holdDDLTime * @param isSrcLocal * If the source directory is LOCAL * @param isSkewedStoreAsSubdir @@ -1704,7 +1698,7 @@ private void constructOneLBLocationMap(FileStatus fSta, * @param isAcid true if this is an ACID based write */ public void loadTable(Path loadPath, String tableName, boolean replace, - boolean holdDDLTime, boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid) + boolean isSrcLocal, boolean isSkewedStoreAsSubdir, boolean isAcid) throws HiveException { List newFiles = new ArrayList(); Table tbl = getTable(tableName); @@ -1737,13 +1731,12 @@ public void loadTable(Path loadPath, String tableName, boolean replace, throw new HiveException(e); } - if (!holdDDLTime) { - try { - alterTable(tableName, tbl); - } catch (InvalidOperationException e) { - throw new HiveException(e); - } + try { + alterTable(tableName, tbl); + } catch (InvalidOperationException e) { + throw new HiveException(e); } + fireInsertEvent(tbl, null, newFiles); } @@ -2934,8 +2927,6 @@ protected static void replaceFiles(Path tablePath, Path srcf, Path destf, Path o try { FileSystem destFs = destf.getFileSystem(conf); - boolean inheritPerms = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); // check if srcf contains nested sub-directories FileStatus[] srcs; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index 9f8cfd1..e9fbfb1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -199,7 +199,6 @@ KW_ELSE: 'ELSE'; KW_END: 'END'; KW_MAPJOIN: 'MAPJOIN'; KW_STREAMTABLE: 'STREAMTABLE'; -KW_HOLD_DDLTIME: 'HOLD_DDLTIME'; KW_CLUSTERSTATUS: 'CLUSTERSTATUS'; KW_UTC: 'UTC'; KW_UTCTIMESTAMP: 'UTC_TMESTAMP'; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 5eededd..d8fb83d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -257,7 +257,6 @@ TOK_HINTLIST; TOK_HINT; TOK_MAPJOIN; TOK_STREAMTABLE; -TOK_HOLD_DDLTIME; TOK_HINTARGLIST; TOK_USERSCRIPTCOLNAMES; TOK_USERSCRIPTCOLSCHEMA; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g index 1dcf392..48bc8b0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g @@ -107,7 +107,6 @@ hintName : KW_MAPJOIN -> TOK_MAPJOIN | KW_STREAMTABLE -> TOK_STREAMTABLE - | KW_HOLD_DDLTIME -> TOK_HOLD_DDLTIME ; hintArgs diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 3262887..f47428c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6141,25 +6141,6 @@ private void genPartnCols(String dest, Operator input, QB qb, ctx.setPartnCols(partnColsNoConvert); } } - /** - * Check for HOLD_DDLTIME hint. - * - * @param qb - * @return true if HOLD_DDLTIME is set, false otherwise. - */ - private boolean checkHoldDDLTime(QB qb) { - ASTNode hints = qb.getParseInfo().getHints(); - if (hints == null) { - return false; - } - for (int pos = 0; pos < hints.getChildCount(); pos++) { - ASTNode hint = (ASTNode) hints.getChild(pos); - if (((ASTNode) hint.getChild(0)).getToken().getType() == HiveParser.TOK_HOLD_DDLTIME) { - return true; - } - } - return false; - } @SuppressWarnings("nls") protected Operator genFileSinkPlan(String dest, QB qb, Operator input) @@ -6181,7 +6162,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) SortBucketRSCtx rsCtx = new SortBucketRSCtx(); DynamicPartitionCtx dpCtx = null; LoadTableDesc ltd = null; - boolean holdDDLTime = checkHoldDDLTime(qb); ListBucketingCtx lbCtx = null; switch (dest_type.intValue()) { @@ -6228,13 +6208,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) qb.getParseInfo().getDestForClause(dest), ErrorMsg.NEED_PARTITION_ERROR.getMsg())); } - // the HOLD_DDLTIIME hint should not be used with dynamic partition since the - // newly generated partitions should always update their DDLTIME - if (holdDDLTime) { - throw new SemanticException(generateErrorMessage( - qb.getParseInfo().getDestForClause(dest), - ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg())); - } dpCtx = qbm.getDPCtx(dest); if (dpCtx == null) { dest_tab.validatePartColumnNames(partSpec, false); @@ -6294,11 +6267,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); ltd.setLbCtx(lbCtx); - - if (holdDDLTime) { - LOG.info("this query will not update transient_lastDdlTime!"); - ltd.setHoldDDLTime(true); - } loadTableWork.add(ltd); } @@ -6404,20 +6372,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_tab.getTableName())); ltd.setLbCtx(lbCtx); - if (holdDDLTime) { - try { - Partition part = db.getPartition(dest_tab, dest_part.getSpec(), false); - if (part == null) { - throw new SemanticException(generateErrorMessage( - qb.getParseInfo().getDestForClause(dest), - ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg())); - } - } catch (HiveException e) { - throw new SemanticException(e); - } - LOG.info("this query will not update transient_lastDdlTime!"); - ltd.setHoldDDLTime(true); - } loadTableWork.add(ltd); if (!outputs.add(new WriteEntity(dest_part, (ltd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 3e74d95..427aac1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -36,7 +36,6 @@ private boolean replace; private DynamicPartitionCtx dpCtx; private ListBucketingCtx lbCtx; - private boolean holdDDLTime; private boolean inheritTableSpecs = true; //For partitions, flag controlling whether the current //table specs are to be used // Need to remember whether this is an acid compliant operation, and if so whether it is an @@ -47,10 +46,6 @@ private org.apache.hadoop.hive.ql.plan.TableDesc table; private Map partitionSpec; // NOTE: this partitionSpec has to be ordered map - public LoadTableDesc() { - this.holdDDLTime = false; - } - public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, @@ -114,18 +109,9 @@ private void init( this.table = table; this.partitionSpec = partitionSpec; this.replace = replace; - this.holdDDLTime = false; this.writeType = writeType; } - public void setHoldDDLTime(boolean ddlTime) { - holdDDLTime = ddlTime; - } - - public boolean getHoldDDLTime() { - return holdDDLTime; - } - @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public TableDesc getTable() { return table; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index ca59e90..e6d3b29 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -139,7 +139,7 @@ db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, false, true, false, false); + db.loadTable(hadoopDataFile[i], src, false, true, false, false); i++; } diff --git a/ql/src/test/queries/clientnegative/ddltime.q b/ql/src/test/queries/clientnegative/ddltime.q deleted file mode 100644 index 3517a60..0000000 --- a/ql/src/test/queries/clientnegative/ddltime.q +++ /dev/null @@ -1,6 +0,0 @@ - -create table T2 like srcpart; - -insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10; - - diff --git a/ql/src/test/queries/clientpositive/ddltime.q b/ql/src/test/queries/clientpositive/ddltime.q deleted file mode 100644 index 3eead6f..0000000 --- a/ql/src/test/queries/clientpositive/ddltime.q +++ /dev/null @@ -1,45 +0,0 @@ -create table T1 like src; - -desc extended T1; - -!sleep 1; -insert overwrite table T1 select * from src; - -desc extended T1; - -!sleep 1; - -insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src; - -desc extended T1; - -!sleep 1; - -insert overwrite table T1 select * from src; - -desc extended T1; - - - -create table if not exists T2 like srcpart; -desc extended T2; - -!sleep 1; - -insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10; - -desc extended T2 partition (ds = '2010-06-21', hr = '1'); - -!sleep 1; - -insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10; - -desc extended T2 partition (ds = '2010-06-21', hr = '1'); - -!sleep 1; - -insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10; - -desc extended T2 partition(ds='2010-06-01', hr='1'); - - diff --git a/ql/src/test/results/clientnegative/ddltime.q.out b/ql/src/test/results/clientnegative/ddltime.q.out deleted file mode 100644 index 25d9af6..0000000 --- a/ql/src/test/results/clientnegative/ddltime.q.out +++ /dev/null @@ -1,9 +0,0 @@ -PREHOOK: query: create table T2 like srcpart -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2 like srcpart -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: 3:23 HOLD_DDLTIME hint cannot be applied to dynamic partitions or non-existent partitions. Error encountered near token ''1'' diff --git a/ql/src/test/results/clientpositive/ddltime.q.out b/ql/src/test/results/clientpositive/ddltime.q.out deleted file mode 100644 index ec8938d..0000000 --- a/ql/src/test/results/clientpositive/ddltime.q.out +++ /dev/null @@ -1,188 +0,0 @@ -PREHOOK: query: create table T1 like src -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1 like src -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: desc extended T1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: desc extended T1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 -key string default -value string default - -#### A masked pattern was here #### -PREHOOK: query: insert overwrite table T1 select * from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -POSTHOOK: query: insert overwrite table T1 select * from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended T1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: desc extended T1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 -key string default -value string default - -#### A masked pattern was here #### -PREHOOK: query: insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -POSTHOOK: query: insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended T1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: desc extended T1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 -key string default -value string default - -#### A masked pattern was here #### -PREHOOK: query: insert overwrite table T1 select * from src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -POSTHOOK: query: insert overwrite table T1 select * from src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended T1 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: desc extended T1 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 -key string default -value string default - -#### A masked pattern was here #### -PREHOOK: query: create table if not exists T2 like srcpart -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table if not exists T2 like srcpart -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: desc extended T2 -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t2 -POSTHOOK: query: desc extended T2 -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t2 -key string default -value string default -ds string -hr string - -# Partition Information -# col_name data_type comment - -ds string -hr string - -#### A masked pattern was here #### -PREHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t2@ds=2010-06-21/hr=1 -POSTHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t2@ds=2010-06-21/hr=1 -POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t2 -POSTHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t2 -key string default -value string default -ds string -hr string - -# Partition Information -# col_name data_type comment - -ds string -hr string - -#### A masked pattern was here #### -PREHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t2@ds=2010-06-21/hr=1 -POSTHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t2@ds=2010-06-21/hr=1 -POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t2 -POSTHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t2 -key string default -value string default -ds string -hr string - -# Partition Information -# col_name data_type comment - -ds string -hr string - -#### A masked pattern was here #### -PREHOOK: query: insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@t2@ds=2010-06-01/hr=1 -POSTHOOK: query: insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@t2@ds=2010-06-01/hr=1 -POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended T2 partition(ds='2010-06-01', hr='1') -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t2 -POSTHOOK: query: desc extended T2 partition(ds='2010-06-01', hr='1') -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t2 -key string default -value string default -ds string -hr string - -# Partition Information -# col_name data_type comment - -ds string -hr string - -#### A masked pattern was here #### -- 1.7.12.4 (Apple Git-37)