Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 982799) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -71,7 +71,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterPartitionProtectModeDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; @@ -185,12 +184,6 @@ return addPartition(db, addPartitionDesc); } - AlterPartitionProtectModeDesc alterPartitionProtectModeDesc = - work.getAlterPartitionProtectModeDesc(); - if (alterPartitionProtectModeDesc != null) { - return alterPartitionProtectMode(db, alterPartitionProtectModeDesc); - } - AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc(); if(simpleDesc != null) { @@ -318,53 +311,6 @@ return 0; } - private int alterPartitionProtectMode(Hive db, - AlterPartitionProtectModeDesc alterPartitionProtectModeDesc) - throws HiveException { - - Table tbl = db.getTable(alterPartitionProtectModeDesc.getDbName(), - alterPartitionProtectModeDesc.getTableName()); - - validateAlterTableType( - tbl, AlterTableDesc.AlterTableTypes.ALTERPARTITIONPROTECTMODE); - - Partition oldPart = db.getPartition( - tbl, alterPartitionProtectModeDesc.getPartSpec(), false); - if (oldPart == null) { - console.printError("Cannot modify protect mode of not existing partition"); - } - - ProtectMode mode = oldPart.getProtectMode(); - - if (alterPartitionProtectModeDesc.isProtectModeEnable() && - alterPartitionProtectModeDesc.getProtectModeType() == - AlterPartitionProtectModeDesc.ProtectModeType.OFFLINE) { - mode.offline = true; - } else if (alterPartitionProtectModeDesc.isProtectModeEnable() && - alterPartitionProtectModeDesc.getProtectModeType() == - AlterPartitionProtectModeDesc.ProtectModeType.NO_DROP) { - mode.noDrop = true; - } else if (!alterPartitionProtectModeDesc.isProtectModeEnable()&& - alterPartitionProtectModeDesc.getProtectModeType() == - AlterPartitionProtectModeDesc.ProtectModeType.OFFLINE) { - mode.offline = false; - } else if (!alterPartitionProtectModeDesc.isProtectModeEnable() && - alterPartitionProtectModeDesc.getProtectModeType() == - AlterPartitionProtectModeDesc.ProtectModeType.NO_DROP) { - mode.noDrop = false; - } - - oldPart.setProtectMode(mode); - - try{ - db.alterPartition(alterPartitionProtectModeDesc.getTableName(), oldPart); - } catch(InvalidOperationException e){ - throw new HiveException(e); - } - - return 0; - } - /** * Rewrite the partition's metadata and force the pre/post execute hooks to * be fired. @@ -1638,6 +1584,11 @@ // alter the table Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, alterTbl .getOldName()); + + Partition part = null; + if(alterTbl.getPartSpec() != null) { + part = db.getPartition(tbl, alterTbl.getPartSpec(), false); + } validateAlterTableType(tbl, alterTbl.getOp()); @@ -1779,33 +1730,51 @@ tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl .getDeserializer())); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) { - tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat()); - tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat()); - if (alterTbl.getSerdeName() != null) { - tbl.setSerializationLib(alterTbl.getSerdeName()); + if(part != null) { + part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat()); + part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat()); + if (alterTbl.getSerdeName() != null) { + part.getTPartition().getSd().getSerdeInfo().setSerializationLib( + alterTbl.getSerdeName()); + } + } else { + tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat()); + tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat()); + if (alterTbl.getSerdeName() != null) { + tbl.setSerializationLib(alterTbl.getSerdeName()); + } } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { - ProtectMode mode = tbl.getProtectMode(); + boolean protectModeEnable = alterTbl.isProtectModeEnable(); + AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType(); + + ProtectMode mode = null; + if(part != null) { + mode = part.getProtectMode(); + } else { + mode = tbl.getProtectMode(); + } - if (alterTbl.isProtectModeEnable() && - alterTbl.getProtectModeType() == - AlterTableDesc.ProtectModeType.OFFLINE) { + if (protectModeEnable + && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) { mode.offline = true; - } else if (alterTbl.isProtectModeEnable() && - alterTbl.getProtectModeType() == - AlterTableDesc.ProtectModeType.NO_DROP) { + } else if (protectModeEnable + && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) { mode.noDrop = true; - } else if (!alterTbl.isProtectModeEnable()&& - alterTbl.getProtectModeType() == - AlterTableDesc.ProtectModeType.OFFLINE) { + } else if (!protectModeEnable + && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) { mode.offline = false; - } else if (!alterTbl.isProtectModeEnable() && - alterTbl.getProtectModeType() == - AlterTableDesc.ProtectModeType.NO_DROP) { + } else if (!protectModeEnable + && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) { mode.noDrop = false; } - tbl.setProtectMode(mode); + if (part != null) { + part.setProtectMode(mode); + } else { + tbl.setProtectMode(mode); + } + } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) { // validate sort columns and bucket columns List columns = Utilities.getColumnNamesFromFieldSchema(tbl @@ -1833,32 +1802,63 @@ tbl.getTTable().getSd().setBucketCols(bucketCols); tbl.getTTable().getSd().setNumBuckets(numBuckets); tbl.getTTable().getSd().setSortCols(sortCols); + } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) { + String newLocation = alterTbl.getNewLocation(); + try { + URI locURI = new URI(newLocation); + if (!locURI.isAbsolute() || locURI.getScheme() == null + || locURI.getScheme().trim().equals("")) { + throw new HiveException( + newLocation + + " is not absolute or has no scheme information. " + + "Please specify a complete absolute uri with scheme information."); + } + if (part != null) { + part.setLocation(newLocation); + } else { + tbl.setDataLocation(locURI); + } + } catch (URISyntaxException e) { + throw new HiveException(e); + } } else { console.printError("Unsupported Alter commnad"); return 1; } // set last modified by properties + String user = null; try { - tbl.setProperty("last_modified_by", conf.getUser()); + user = conf.getUser(); } catch (IOException e) { console.printError("Unable to get current user: " + e.getMessage(), stringifyException(e)); return 1; } - tbl.setProperty("last_modified_time", Long.toString(System - .currentTimeMillis() / 1000)); - try { - tbl.checkValidity(); - } catch (HiveException e) { - console.printError("Invalid table columns : " + e.getMessage(), - stringifyException(e)); - return 1; + if(part == null) { + tbl.setProperty("last_modified_by", user); + tbl.setProperty("last_modified_time", Long.toString(System + .currentTimeMillis() / 1000)); + try { + tbl.checkValidity(); + } catch (HiveException e) { + console.printError("Invalid table columns : " + e.getMessage(), + stringifyException(e)); + return 1; + } + } else { + part.getParameters().put("last_modified_by", user); + part.getParameters().put("last_modified_time", Long.toString(System + .currentTimeMillis() / 1000)); } - + try { - db.alterTable(alterTbl.getOldName(), tbl); + if (part == null) { + db.alterTable(alterTbl.getOldName(), tbl); + } else { + db.alterPartition(tbl.getTableName(), part); + } } catch (InvalidOperationException e) { console.printError("Invalid alter operation: " + e.getMessage()); LOG.info("alter table: " + stringifyException(e)); @@ -1872,8 +1872,13 @@ // contains the new table. This is needed for rename - both the old and the // new table names are // passed - work.getInputs().add(new ReadEntity(oldTbl)); - work.getOutputs().add(new WriteEntity(tbl)); + if(part != null) { + work.getInputs().add(new ReadEntity(part)); + work.getOutputs().add(new WriteEntity(part)); + } else { + work.getInputs().add(new ReadEntity(oldTbl)); + work.getOutputs().add(new WriteEntity(tbl)); + } return 0; } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 982799) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -55,7 +55,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterPartitionProtectModeDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; @@ -108,6 +107,24 @@ return TokenToTypeName.get(token); } + static class TablePartition { + String tableName; + HashMap partSpec = null; + + public TablePartition(){ + } + + public TablePartition (ASTNode tblPart) throws SemanticException { + tableName = unescapeIdentifier(tblPart.getChild(0).getText()); + if (tblPart.getChildCount() > 1) { + ASTNode part = (ASTNode) tblPart.getChild(1); + if (part.getToken().getType() == HiveParser.TOK_PARTSPEC) { + this.partSpec = DDLSemanticAnalyzer.getPartSpec(part); + } + } + } + } + public DDLSemanticAnalyzer(HiveConf conf) throws SemanticException { super(conf); // Partition can't have this name @@ -120,7 +137,20 @@ @Override public void analyzeInternal(ASTNode ast) throws SemanticException { - if (ast.getToken().getType() == HiveParser.TOK_DROPTABLE) { + + if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PARTITION) { + TablePartition tblPart = new TablePartition((ASTNode)ast.getChild(0)); + String tableName = tblPart.tableName; + HashMap partSpec = tblPart.partSpec; + ast = (ASTNode)ast.getChild(1); + if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { + analyzeAlterTableFileFormat(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE) { + analyzeAlterTableProtectMode(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) { + analyzeAlterTableLocation(ast, tableName, partSpec); + } + } else if (ast.getToken().getType() == HiveParser.TOK_DROPTABLE) { analyzeDropTable(ast, false); } else if (ast.getToken().getType() == HiveParser.TOK_CREATEINDEX) { analyzeCreateIndex(ast); @@ -166,18 +196,12 @@ analyzeAlterTableAddParts(ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { analyzeAlterTableDropParts(ast); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE) { - analyzeAlterTableAlterPartsProtectMode(ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) { analyzeAlterTableProps(ast, false); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) { analyzeAlterTableSerdeProps(ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER) { analyzeAlterTableSerde(ast); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { - analyzeAlterTableFileFormat(ast); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) { - analyzeAlterTableProtectMode(ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { analyzeAlterTableClusterSort(ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERINDEX_REBUILD) { @@ -397,14 +421,15 @@ alterTblDesc), conf)); } - private void analyzeAlterTableFileFormat(ASTNode ast) + private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { - String tableName = unescapeIdentifier(ast.getChild(0).getText()); + String inputFormat = null; String outputFormat = null; String storageHandler = null; String serde = null; - ASTNode child = (ASTNode) ast.getChild(1); + ASTNode child = (ASTNode) ast.getChild(0); switch (child.getToken().getType()) { case HiveParser.TOK_TABLEFILEFORMAT: @@ -442,22 +467,34 @@ serde = COLUMNAR_SERDE; break; } + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, inputFormat, - outputFormat, serde, storageHandler); + outputFormat, serde, storageHandler, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterTblDesc), conf)); + } + + private void analyzeAlterTableLocation(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + + String newLocation = unescapeSQLString(ast.getChild(0).getText()); + + AlterTableDesc alterTblDesc = new AlterTableDesc (tableName, newLocation, partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); } - private void analyzeAlterTableProtectMode(ASTNode ast) + private void analyzeAlterTableProtectMode(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { - String tableName = unescapeIdentifier(ast.getChild(0).getText()); AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.ALTERPROTECTMODE); alterTblDesc.setOldName(tableName); + alterTblDesc.setPartSpec(partSpec); - ASTNode child = (ASTNode) ast.getChild(1); + ASTNode child = (ASTNode) ast.getChild(0); switch (child.getToken().getType()) { case HiveParser.TOK_ENABLE: @@ -589,7 +626,7 @@ LOG.info("analyzeDescribeTable done"); } - private HashMap getPartSpec(ASTNode partspec) + private static HashMap getPartSpec(ASTNode partspec) throws SemanticException { HashMap partSpec = new LinkedHashMap(); for (int i = 0; i < partspec.getChildCount(); ++i) { @@ -832,60 +869,6 @@ } /** - * Alter protect mode of a table or partition - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsin failed - */ - private void analyzeAlterTableAlterPartsProtectMode(CommonTree ast) - throws SemanticException { - - String tblName = unescapeIdentifier(ast.getChild(0).getText()); - - List> partSpecs = getPartitionSpecs(ast); - Map partSpec = partSpecs.get(0); - AlterPartitionProtectModeDesc desc = new AlterPartitionProtectModeDesc( - MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partSpec); - - CommonTree child = (CommonTree) ast.getChild(2); - - switch (child.getToken().getType()) { - case HiveParser.TOK_ENABLE: - desc.setProtectModeEnable(true); - break; - case HiveParser.TOK_DISABLE: - desc.setProtectModeEnable(false); - break; - default: - throw new SemanticException( - "Set Protect mode Syntax parsing error."); - } - - ASTNode grandChild = (ASTNode) child.getChild(0); - switch (grandChild.getToken().getType()) { - case HiveParser.TOK_OFFLINE: - desc.setProtectModeType( - AlterPartitionProtectModeDesc.ProtectModeType.OFFLINE); - break; - case HiveParser.TOK_NO_DROP: - desc.setProtectModeType( - AlterPartitionProtectModeDesc.ProtectModeType.NO_DROP); - break; - case HiveParser.TOK_READONLY: - throw new SemanticException( - "Potect mode READONLY is not implemented"); - default: - throw new SemanticException( - "Only protect mode NO_DROP or OFFLINE supported"); - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc - ), conf)); - } - - /** * Rewrite the metadata for one or more partitions in a table. Useful when * an external process modifies files on HDFS and you want the pre/post * hooks to be fired for the specified partition. Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 982799) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -93,6 +93,7 @@ TOK_LIKETABLE; TOK_DESCTABLE; TOK_DESCFUNCTION; +TOK_ALTERTABLE_PARTITION; TOK_ALTERTABLE_RENAME; TOK_ALTERTABLE_ADDCOLS; TOK_ALTERTABLE_RENAMECOL; @@ -105,9 +106,10 @@ TOK_ALTERTABLE_UNARCHIVE; TOK_ALTERTABLE_SERDEPROPERTIES; TOK_ALTERTABLE_SERIALIZER; +TOK_TABLE_PARTITION; TOK_ALTERTABLE_FILEFORMAT; +TOK_ALTERTABLE_LOCATION; TOK_ALTERTABLE_PROPERTIES; -TOK_ALTERTABLE_PROTECTMODE; TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION; TOK_ALTERINDEX_REBUILD; TOK_MSCK; @@ -353,14 +355,12 @@ | alterStatementSuffixRenameCol | alterStatementSuffixDropPartitions | alterStatementSuffixAddPartitions - | alterStatementSuffixAlterPartitionsProtectMode | alterStatementSuffixTouch | alterStatementSuffixArchive | alterStatementSuffixUnArchive | alterStatementSuffixProperties | alterStatementSuffixSerdeProperties - | alterStatementSuffixFileFormat - | alterStatementSuffixProtectMode + | alterTblPartitionStatement | alterStatementSuffixClusterbySortby ; @@ -405,13 +405,6 @@ -> ^(TOK_ALTERTABLE_ADDPARTS Identifier ifNotExists? (partitionSpec partitionLocation?)+) ; -alterStatementSuffixAlterPartitionsProtectMode -@init { msgs.push("alter partition protect mode statement"); } -@after { msgs.pop(); } - : Identifier partitionSpec alterProtectMode - -> ^(TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE Identifier partitionSpec alterProtectMode) - ; - alterStatementSuffixTouch @init { msgs.push("touch statement"); } @after { msgs.pop(); } @@ -470,20 +463,49 @@ -> ^(TOK_ALTERTABLE_SERDEPROPERTIES $name tableProperties) ; +tablePartitionPrefix +@init {msgs.push("table partition prefix");} +@after {msgs.pop();} + :name=Identifier partitionSpec? + ->^(TOK_TABLE_PARTITION $name partitionSpec?) + ; + +alterTblPartitionStatement +@init {msgs.push("alter table partition statement");} +@after {msgs.pop();} + : tablePartitionPrefix alterTblPartitionStatementSuffix + -> ^(TOK_ALTERTABLE_PARTITION tablePartitionPrefix alterTblPartitionStatementSuffix) + ; + +alterTblPartitionStatementSuffix +@init {msgs.push("alter table partition statement suffix");} +@after {msgs.pop();} + : alterStatementSuffixFileFormat + | alterStatementSuffixLocation + | alterStatementSuffixProtectMode + ; + alterStatementSuffixFileFormat @init {msgs.push("alter fileformat statement"); } -@after {msgs.pop(); } - :name=Identifier KW_SET KW_FILEFORMAT fileFormat - -> ^(TOK_ALTERTABLE_FILEFORMAT $name fileFormat) +@after {msgs.pop();} + : KW_SET KW_FILEFORMAT fileFormat + -> ^(TOK_ALTERTABLE_FILEFORMAT fileFormat) ; +alterStatementSuffixLocation +@init {msgs.push("alter location");} +@after {msgs.pop();} + : KW_SET KW_LOCATION newLoc=StringLiteral + -> ^(TOK_ALTERTABLE_LOCATION $newLoc) + ; + alterStatementSuffixProtectMode -@init {msgs.push("alter protectmode statement"); } -@after {msgs.pop(); } - :name=Identifier alterProtectMode - -> ^(TOK_ALTERTABLE_PROTECTMODE $name alterProtectMode) - ; - +@init { msgs.push("alter partition protect mode statement"); } +@after { msgs.pop(); } + : alterProtectMode + -> ^(TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE alterProtectMode) + ; + alterProtectMode @init { msgs.push("protect mode specification enable"); } @after { msgs.pop(); } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (revision 982799) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (working copy) @@ -30,6 +30,7 @@ public final class SemanticAnalyzerFactory { static HashMap commandType = new HashMap(); + static HashMap tablePartitionCommandType = new HashMap(); static { commandType.put(HiveParser.TOK_EXPLAIN, "EXPLAIN"); @@ -65,16 +66,23 @@ commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, "ALTERVIEW_PROPERTIES"); commandType.put(HiveParser.TOK_QUERY, "QUERY"); } + + static { + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE, + new String[] { "ALTERTABLE_PROTECTMODE", "ALTERPARTITION_PROTECTMODE" }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT, + new String[] { "ALTERTABLE_FILEFORMAT", "ALTERPARTITION_FILEFORMAR" }); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_LOCATION, + new String[] { "ALTERTABLE_LOCATION", "ALTERPARTITION_LOCATION" }); + } + public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) throws SemanticException { if (tree.getToken() == null) { throw new RuntimeException("Empty Syntax Tree"); } else { - if (SessionState.get() != null) { - SessionState.get().setCommandType( - commandType.get(tree.getToken().getType())); - } + setSessionCommandType(commandType.get(tree.getToken().getType())); switch (tree.getToken().getType()) { case HiveParser.TOK_EXPLAIN: @@ -92,7 +100,6 @@ case HiveParser.TOK_ALTERTABLE_RENAME: case HiveParser.TOK_ALTERTABLE_DROPPARTS: case HiveParser.TOK_ALTERTABLE_ADDPARTS: - case HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE: case HiveParser.TOK_ALTERTABLE_PROPERTIES: case HiveParser.TOK_ALTERTABLE_SERIALIZER: case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: @@ -104,13 +111,21 @@ case HiveParser.TOK_SHOWPARTITIONS: case HiveParser.TOK_CREATEINDEX: case HiveParser.TOK_DROPINDEX: - case HiveParser.TOK_ALTERTABLE_FILEFORMAT: - case HiveParser.TOK_ALTERTABLE_PROTECTMODE: case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: case HiveParser.TOK_ALTERTABLE_TOUCH: case HiveParser.TOK_ALTERTABLE_ARCHIVE: case HiveParser.TOK_ALTERTABLE_UNARCHIVE: return new DDLSemanticAnalyzer(conf); + case HiveParser.TOK_ALTERTABLE_PARTITION: + String commandType = null; + Integer type = ((ASTNode) tree.getChild(1)).getToken().getType(); + if (tree.getChild(0).getChildCount() > 1) { + commandType = tablePartitionCommandType.get(type)[1]; + } else { + commandType = tablePartitionCommandType.get(type)[0]; + } + setSessionCommandType(commandType); + return new DDLSemanticAnalyzer(conf); case HiveParser.TOK_CREATEFUNCTION: case HiveParser.TOK_DROPFUNCTION: return new FunctionSemanticAnalyzer(conf); @@ -120,6 +135,12 @@ } } + private static void setSessionCommandType(String commandType) { + if (SessionState.get() != null) { + SessionState.get().setCommandType(commandType); + } + } + private SemanticAnalyzerFactory() { // prevent instantiation } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AlterPartitionProtectModeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterPartitionProtectModeDesc.java (revision 982799) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterPartitionProtectModeDesc.java (working copy) @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.LinkedHashMap; -import java.util.Map; - -/** - * Contains the information needed to add a partition. - */ -public class AlterPartitionProtectModeDesc extends DDLDesc implements Serializable { - - private static final long serialVersionUID = 1L; - - String tableName; - String dbName; - boolean protectModeEnable; - ProtectModeType protectModeType; - - public static enum ProtectModeType { - NO_DROP, OFFLINE, READ_ONLY - }; - - LinkedHashMap partSpec; - - /** - * For serialization only. - */ - public AlterPartitionProtectModeDesc() { - } - - /** - * @param dbName - * database to add to. - * @param tableName - * table to add to. - * @param partSpec - * partition specification. - * @param location - * partition location, relative to table location. - * @param ifNotExists - * if true, the partition is only added if it doesn't exist - */ - public AlterPartitionProtectModeDesc(String dbName, String tableName, - Map partSpec) { - super(); - this.dbName = dbName; - this.tableName = tableName; - this.partSpec = new LinkedHashMap(partSpec); - } - - /** - * @return database name - */ - public String getDbName() { - return dbName; - } - - /** - * @param dbName - * database name - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** - * @return the table we're going to add the partitions to. - */ - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the table we're going to add the partitions to. - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return partition specification. - */ - public LinkedHashMap getPartSpec() { - return partSpec; - } - - /** - * @param partSpec - * partition specification - */ - public void setPartSpec(LinkedHashMap partSpec) { - this.partSpec = partSpec; - } - - public boolean isProtectModeEnable() { - return protectModeEnable; - } - - public void setProtectModeEnable(boolean protectModeEnable) { - this.protectModeEnable = protectModeEnable; - } - - public ProtectModeType getProtectModeType() { - return protectModeType; - } - - public void setProtectModeType(ProtectModeType protectModeType) { - this.protectModeType = protectModeType; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (revision 982799) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (working copy) @@ -42,7 +42,7 @@ public static enum AlterTableTypes { RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, - TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, + TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, ALTERLOCATION, }; public static enum ProtectModeType { @@ -70,6 +70,8 @@ boolean first; String afterCol; boolean expectView; + HashMap partSpec; + private String newLocation; boolean protectModeEnable; ProtectModeType protectModeType; @@ -149,9 +151,10 @@ * new table input format * @param outputFormat * new table output format + * @param partSpec */ public AlterTableDesc(String name, String inputFormat, String outputFormat, - String serdeName, String storageHandler) { + String serdeName, String storageHandler, HashMap partSpec) { super(); op = AlterTableTypes.ADDFILEFORMAT; oldName = name; @@ -159,6 +162,7 @@ this.outputFormat = outputFormat; this.serdeName = serdeName; this.storageHandler = storageHandler; + this.partSpec = partSpec; } public AlterTableDesc(String tableName, int numBuckets, @@ -170,6 +174,14 @@ sortColumns = new ArrayList(sortCols); } + public AlterTableDesc(String tableName, String newLocation, + HashMap partSpec) { + op = AlterTableTypes.ALTERLOCATION; + this.oldName = tableName; + this.newLocation = newLocation; + this.partSpec = partSpec; + } + @Explain(displayName = "new columns") public List getNewColsString() { return Utilities.getFieldSchemaString(getNewCols()); @@ -481,6 +493,34 @@ this.expectView = expectView; } + /** + * @return part specification + */ + public HashMap getPartSpec() { + return partSpec; + } + + /** + * @param partSpec + */ + public void setPartSpec(HashMap partSpec) { + this.partSpec = partSpec; + } + + /** + * @return new location + */ + public String getNewLocation() { + return newLocation; + } + + /** + * @param newLocation new location + */ + public void setNewLocation(String newLocation) { + this.newLocation = newLocation; + } + public boolean isProtectModeEnable() { return protectModeEnable; } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (revision 982799) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (working copy) @@ -43,7 +43,6 @@ private ShowPartitionsDesc showPartsDesc; private DescTableDesc descTblDesc; private AddPartitionDesc addPartitionDesc; - private AlterPartitionProtectModeDesc alterPartitionProtectModeDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; private ShowTableStatusDesc showTblStatusDesc; @@ -185,17 +184,6 @@ } /** - * @param addPartitionDesc - * information about the partitions we want to add. - */ - public DDLWork(HashSet inputs, HashSet outputs, - AlterPartitionProtectModeDesc alterPartitionProtectModeDesc) { - this(inputs, outputs); - - this.alterPartitionProtectModeDesc = alterPartitionProtectModeDesc; - } - - /** * @param touchDesc * information about the table/partitions that we want to touch */ @@ -494,13 +482,4 @@ this.dropIdxDesc = dropIdxDesc; } - public AlterPartitionProtectModeDesc getAlterPartitionProtectModeDesc() { - return alterPartitionProtectModeDesc; - } - - public void setAlterPartitionProtectModeDesc( - AlterPartitionProtectModeDesc alterPartitionProtectModeDesc) { - this.alterPartitionProtectModeDesc = alterPartitionProtectModeDesc; - } - } Index: ql/src/test/queries/clientpositive/alter_partition_format_loc.q =================================================================== --- ql/src/test/queries/clientpositive/alter_partition_format_loc.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_partition_format_loc.q (revision 0) @@ -0,0 +1,32 @@ +create table alter_partition_format_test (key int, value string); +desc extended alter_partition_format_test; + +alter table alter_partition_format_test set fileformat rcfile; +desc extended alter_partition_format_test; + +alter table alter_partition_format_test set location "file:/test/test/"; +desc extended alter_partition_format_test; + +drop table alter_partition_format_test; + +--partitioned table +create table alter_partition_format_test (key int, value string) partitioned by (ds string); + +alter table alter_partition_format_test add partition(ds='2010'); +desc extended alter_partition_format_test partition(ds='2010'); + +alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile; +desc extended alter_partition_format_test partition(ds='2010'); + +alter table alter_partition_format_test partition(ds='2010') set location "file:/test/test/ds=2010"; +desc extended alter_partition_format_test partition(ds='2010'); + +desc extended alter_partition_format_test; + +alter table alter_partition_format_test set fileformat rcfile; +desc extended alter_partition_format_test; + +alter table alter_partition_format_test set location "file:/test/test/"; +desc extended alter_partition_format_test; + +drop table alter_partition_format_test; \ No newline at end of file Index: ql/src/test/results/clientpositive/alter_partition_format_loc.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_partition_format_loc.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_partition_format_loc.q.out (revision 0) @@ -0,0 +1,141 @@ +PREHOOK: query: create table alter_partition_format_test (key int, value string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_partition_format_test (key int, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_partition_format_test +PREHOOK: query: desc extended alter_partition_format_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test +POSTHOOK: type: DESCTABLE +key int +value string + +Detailed Table Information Table(tableName:alter_partition_format_test, dbName:default, owner:heyongqiang, createTime:1281148707, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter_partition_format_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1281148707}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: alter table alter_partition_format_test set fileformat rcfile +PREHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: query: alter table alter_partition_format_test set fileformat rcfile +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Output: default@alter_partition_format_test +PREHOOK: query: desc extended alter_partition_format_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test +POSTHOOK: type: DESCTABLE +key int from deserializer +value string from deserializer + +Detailed Table Information Table(tableName:alter_partition_format_test, dbName:default, owner:heyongqiang, createTime:1281148707, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter_partition_format_test, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148707, transient_lastDdlTime=1281148707}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: alter table alter_partition_format_test set location "file:/test/test/" +PREHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: query: alter table alter_partition_format_test set location "file:/test/test/" +POSTHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Output: default@alter_partition_format_test +PREHOOK: query: desc extended alter_partition_format_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test +POSTHOOK: type: DESCTABLE +key int from deserializer +value string from deserializer + +Detailed Table Information Table(tableName:alter_partition_format_test, dbName:default, owner:heyongqiang, createTime:1281148707, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:file:/test/test/, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148707, transient_lastDdlTime=1281148707}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: drop table alter_partition_format_test +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table alter_partition_format_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@alter_partition_format_test +PREHOOK: query: --partitioned table +create table alter_partition_format_test (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: --partitioned table +create table alter_partition_format_test (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_partition_format_test +PREHOOK: query: alter table alter_partition_format_test add partition(ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: query: alter table alter_partition_format_test add partition(ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@alter_partition_format_test@ds=2010 +PREHOOK: query: desc extended alter_partition_format_test partition(ds='2010') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test partition(ds='2010') +POSTHOOK: type: DESCTABLE +key int +value string +ds string + +Detailed Partition Information Partition(values:[2010], dbName:default, tableName:alter_partition_format_test, createTime:1281148708, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter_partition_format_test/ds=2010, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1281148708}) +PREHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile +PREHOOK: type: ALTERPARTITION_FILEFORMAR +POSTHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile +POSTHOOK: type: ALTERPARTITION_FILEFORMAR +POSTHOOK: Input: default@alter_partition_format_test@ds=2010 +POSTHOOK: Output: default@alter_partition_format_test@ds=2010 +PREHOOK: query: desc extended alter_partition_format_test partition(ds='2010') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test partition(ds='2010') +POSTHOOK: type: DESCTABLE +key int +value string +ds string + +Detailed Partition Information Partition(values:[2010], dbName:default, tableName:alter_partition_format_test, createTime:1281148708, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter_partition_format_test/ds=2010, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{last_modified_by=heyongqiang, last_modified_time=1281148708, transient_lastDdlTime=1281148708}) +PREHOOK: query: alter table alter_partition_format_test partition(ds='2010') set location "file:/test/test/ds=2010" +PREHOOK: type: ALTERPARTITION_LOCATION +POSTHOOK: query: alter table alter_partition_format_test partition(ds='2010') set location "file:/test/test/ds=2010" +POSTHOOK: type: ALTERPARTITION_LOCATION +POSTHOOK: Input: default@alter_partition_format_test@ds=2010 +POSTHOOK: Output: default@alter_partition_format_test@ds=2010 +PREHOOK: query: desc extended alter_partition_format_test partition(ds='2010') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test partition(ds='2010') +POSTHOOK: type: DESCTABLE +key int +value string +ds string + +Detailed Partition Information Partition(values:[2010], dbName:default, tableName:alter_partition_format_test, createTime:1281148708, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:file:/test/test/ds=2010, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{last_modified_by=heyongqiang, last_modified_time=1281148708, transient_lastDdlTime=1281148708}) +PREHOOK: query: desc extended alter_partition_format_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test +POSTHOOK: type: DESCTABLE +key int +value string +ds string + +Detailed Table Information Table(tableName:alter_partition_format_test, dbName:default, owner:heyongqiang, createTime:1281148708, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter_partition_format_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=1281148708}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: alter table alter_partition_format_test set fileformat rcfile +PREHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: query: alter table alter_partition_format_test set fileformat rcfile +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Output: default@alter_partition_format_test +PREHOOK: query: desc extended alter_partition_format_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test +POSTHOOK: type: DESCTABLE +key int from deserializer +value string from deserializer +ds string + +Detailed Table Information Table(tableName:alter_partition_format_test, dbName:default, owner:heyongqiang, createTime:1281148708, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter_partition_format_test, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148709, transient_lastDdlTime=1281148709}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: alter table alter_partition_format_test set location "file:/test/test/" +PREHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: query: alter table alter_partition_format_test set location "file:/test/test/" +POSTHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Output: default@alter_partition_format_test +PREHOOK: query: desc extended alter_partition_format_test +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended alter_partition_format_test +POSTHOOK: type: DESCTABLE +key int from deserializer +value string from deserializer +ds string + +Detailed Table Information Table(tableName:alter_partition_format_test, dbName:default, owner:heyongqiang, createTime:1281148708, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:file:/test/test/, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148709, transient_lastDdlTime=1281148709}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: drop table alter_partition_format_test +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table alter_partition_format_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@alter_partition_format_test Index: ql/src/test/results/clientpositive/diff_part_input_formats.q.out =================================================================== --- ql/src/test/results/clientpositive/diff_part_input_formats.q.out (revision 982799) +++ ql/src/test/results/clientpositive/diff_part_input_formats.q.out (working copy) @@ -15,9 +15,9 @@ POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@part_test@ds=1 PREHOOK: query: ALTER TABLE part_test SET FILEFORMAT RCFILE -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: ALTER TABLE part_test SET FILEFORMAT RCFILE -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@part_test POSTHOOK: Output: default@part_test PREHOOK: query: ALTER TABLE part_test ADD PARTITION(ds='2') @@ -27,8 +27,8 @@ POSTHOOK: Output: default@part_test@ds=2 PREHOOK: query: SELECT count(1) FROM part_test WHERE ds='3' PREHOOK: type: QUERY -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-21-20_502_7998672376160685808/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-57-45_916_425825309143884801/-mr-10000 POSTHOOK: query: SELECT count(1) FROM part_test WHERE ds='3' POSTHOOK: type: QUERY -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-21-20_502_7998672376160685808/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-57-45_916_425825309143884801/-mr-10000 0 Index: ql/src/test/results/clientpositive/fileformat_mix.q.out =================================================================== --- ql/src/test/results/clientpositive/fileformat_mix.q.out (revision 982799) +++ ql/src/test/results/clientpositive/fileformat_mix.q.out (working copy) @@ -4,9 +4,9 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: default@fileformat_mix_test PREHOOK: query: alter table fileformat_mix_test set fileformat Sequencefile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table fileformat_mix_test set fileformat Sequencefile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@fileformat_mix_test POSTHOOK: Output: default@fileformat_mix_test PREHOOK: query: insert overwrite table fileformat_mix_test partition (ds='1') @@ -29,9 +29,9 @@ POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: alter table fileformat_mix_test set fileformat rcfile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table fileformat_mix_test set fileformat rcfile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@fileformat_mix_test POSTHOOK: Output: default@fileformat_mix_test POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -40,12 +40,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@fileformat_mix_test@ds=1 PREHOOK: Input: default@fileformat_mix_test@ds=2 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-21-45_393_3654336984212621494/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-57-57_858_225783878907101923/-mr-10000 POSTHOOK: query: select count(1) from fileformat_mix_test POSTHOOK: type: QUERY POSTHOOK: Input: default@fileformat_mix_test@ds=1 POSTHOOK: Input: default@fileformat_mix_test@ds=2 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-21-45_393_3654336984212621494/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-57-57_858_225783878907101923/-mr-10000 POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] 500 @@ -53,12 +53,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@fileformat_mix_test@ds=1 PREHOOK: Input: default@fileformat_mix_test@ds=2 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-21-48_001_5704820361095123784/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-03_160_2998957707101006120/-mr-10000 POSTHOOK: query: select src from fileformat_mix_test POSTHOOK: type: QUERY POSTHOOK: Input: default@fileformat_mix_test@ds=1 POSTHOOK: Input: default@fileformat_mix_test@ds=2 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-21-48_001_5704820361095123784/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-03_160_2998957707101006120/-mr-10000 POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] 238 Index: ql/src/test/results/clientpositive/partition_wise_fileformat.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat.q.out (revision 982799) +++ ql/src/test/results/clientpositive/partition_wise_fileformat.q.out (working copy) @@ -20,8 +20,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat columns:struct columns { string key, string value} @@ -32,7 +32,7 @@ maxFileSize:216 minFileSize:216 lastAccessTime:0 -lastUpdateTime:1279737923000 +lastUpdateTime:1281167892000 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100) PREHOOK: type: SHOW_TABLESTATUS @@ -41,8 +41,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=100 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=100 inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat columns:struct columns { string key, string value} @@ -53,16 +53,16 @@ maxFileSize:216 minFileSize:216 lastAccessTime:0 -lastUpdateTime:1279737923000 +lastUpdateTime:1281167892000 PREHOOK: query: select key from partition_test_partitioned where dt=100 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-23_982_7698604976741917428/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-12_540_2239548703782463764/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=100 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-23_982_7698604976741917428/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-12_540_2239548703782463764/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] 238 @@ -93,11 +93,11 @@ PREHOOK: query: select key from partition_test_partitioned PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-26_455_7900841763716004103/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-16_365_1091922344087311950/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-26_455_7900841763716004103/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-16_365_1091922344087311950/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] 238 @@ -126,9 +126,9 @@ PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@partition_test_partitioned POSTHOOK: Output: default@partition_test_partitioned POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -154,8 +154,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat columns:struct columns { string key, string value} @@ -166,7 +166,7 @@ maxFileSize:370 minFileSize:216 lastAccessTime:0 -lastUpdateTime:1279737931000 +lastUpdateTime:1281167904000 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100) PREHOOK: type: SHOW_TABLESTATUS @@ -177,8 +177,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=100 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=100 inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat columns:struct columns { string key, string value} @@ -189,7 +189,7 @@ maxFileSize:216 minFileSize:216 lastAccessTime:0 -lastUpdateTime:1279737931000 +lastUpdateTime:1281167904000 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101) PREHOOK: type: SHOW_TABLESTATUS @@ -200,8 +200,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat columns:struct columns { string key, string value} @@ -212,16 +212,16 @@ maxFileSize:370 minFileSize:370 lastAccessTime:0 -lastUpdateTime:1279737931000 +lastUpdateTime:1281167904000 PREHOOK: query: select key from partition_test_partitioned where dt=100 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-31_748_8367411748103278779/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-25_092_6856625830670881694/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=100 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-31_748_8367411748103278779/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-25_092_6856625830670881694/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -254,11 +254,11 @@ PREHOOK: query: select key from partition_test_partitioned where dt=101 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-34_225_2802204195313608860/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-29_018_4189228390799385358/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=101 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-34_225_2802204195313608860/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-29_018_4189228390799385358/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -292,12 +292,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=100 PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-36_701_4909943953812237318/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-32_962_5959761298152062/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-36_701_4909943953812237318/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-32_962_5959761298152062/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -353,9 +353,9 @@ PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@partition_test_partitioned POSTHOOK: Output: default@partition_test_partitioned POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -387,8 +387,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat columns:struct columns { string key, string value} @@ -399,7 +399,7 @@ maxFileSize:888 minFileSize:216 lastAccessTime:0 -lastUpdateTime:1279737941000 +lastUpdateTime:1281167921000 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100) PREHOOK: type: SHOW_TABLESTATUS @@ -412,8 +412,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=100 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=100 inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat columns:struct columns { string key, string value} @@ -424,7 +424,7 @@ maxFileSize:216 minFileSize:216 lastAccessTime:0 -lastUpdateTime:1279737941000 +lastUpdateTime:1281167921000 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101) PREHOOK: type: SHOW_TABLESTATUS @@ -437,8 +437,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat columns:struct columns { string key, string value} @@ -449,7 +449,7 @@ maxFileSize:370 minFileSize:370 lastAccessTime:0 -lastUpdateTime:1279737941000 +lastUpdateTime:1281167921000 PREHOOK: query: show table extended like partition_test_partitioned partition(dt=102) PREHOOK: type: SHOW_TABLESTATUS @@ -462,8 +462,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=102 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=102 inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat columns:struct columns { string key, string value} @@ -474,16 +474,16 @@ maxFileSize:888 minFileSize:888 lastAccessTime:0 -lastUpdateTime:1279737941000 +lastUpdateTime:1281167921000 PREHOOK: query: select key from partition_test_partitioned where dt=100 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-42_155_7749641338163247267/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-42_663_3677129000804574570/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=100 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-42_155_7749641338163247267/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-42_663_3677129000804574570/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -518,11 +518,11 @@ PREHOOK: query: select key from partition_test_partitioned where dt=101 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-44_640_267727809536101133/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-46_288_5985616954285711901/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=101 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-44_640_267727809536101133/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-46_288_5985616954285711901/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -557,11 +557,11 @@ PREHOOK: query: select key from partition_test_partitioned where dt=102 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-47_122_5836450210515977525/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-50_080_1123984711773684734/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=102 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=102 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-47_122_5836450210515977525/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-50_080_1123984711773684734/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -598,13 +598,13 @@ PREHOOK: Input: default@partition_test_partitioned@dt=100 PREHOOK: Input: default@partition_test_partitioned@dt=101 PREHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-49_604_111610574124667/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-54_055_5906737999229998851/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 POSTHOOK: Input: default@partition_test_partitioned@dt=101 POSTHOOK: Input: default@partition_test_partitioned@dt=102 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-49_604_111610574124667/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-54_055_5906737999229998851/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -691,13 +691,13 @@ PREHOOK: Input: default@partition_test_partitioned@dt=100 PREHOOK: Input: default@partition_test_partitioned@dt=101 PREHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-52_135_418677121289533314/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-59_207_1542640067535172047/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt >=100 and dt <= 102 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 POSTHOOK: Input: default@partition_test_partitioned@dt=101 POSTHOOK: Input: default@partition_test_partitioned@dt=102 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-45-52_135_418677121289533314/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-58-59_207_1542640067535172047/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out (revision 982799) +++ ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out (working copy) @@ -14,9 +14,9 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@partition_test_partitioned POSTHOOK: Output: default@partition_test_partitioned POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -34,9 +34,9 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@partition_test_partitioned POSTHOOK: Output: default@partition_test_partitioned POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -62,13 +62,13 @@ PREHOOK: Input: default@partition_test_partitioned@dt=100 PREHOOK: Input: default@partition_test_partitioned@dt=101 PREHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-46-02_669_7182591152833006574/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-59-19_214_8947184382877860008/-mr-10000 POSTHOOK: query: select * from partition_test_partitioned where dt >=100 and dt <= 102 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=100 POSTHOOK: Input: default@partition_test_partitioned@dt=101 POSTHOOK: Input: default@partition_test_partitioned@dt=102 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-46-02_669_7182591152833006574/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-59-19_214_8947184382877860008/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out =================================================================== --- ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out (revision 982799) +++ ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out (working copy) @@ -4,9 +4,9 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: default@partition_test_partitioned PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@partition_test_partitioned POSTHOOK: Output: default@partition_test_partitioned PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 @@ -26,8 +26,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat columns:struct columns { string key, string value} @@ -38,12 +38,12 @@ maxFileSize:370 minFileSize:370 lastAccessTime:0 -lastUpdateTime:1279737965000 +lastUpdateTime:1281167964000 PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@partition_test_partitioned POSTHOOK: Output: default@partition_test_partitioned POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -69,8 +69,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=102 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=102 inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat columns:struct columns { string key, string value} @@ -81,16 +81,16 @@ maxFileSize:888 minFileSize:888 lastAccessTime:0 -lastUpdateTime:1279737968000 +lastUpdateTime:1281167969000 PREHOOK: query: select key from partition_test_partitioned where dt=102 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-46-08_379_9044568484329597052/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-59-29_952_4034031846702868123/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=102 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=102 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-46-08_379_9044568484329597052/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-59-29_952_4034031846702868123/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] @@ -145,8 +145,8 @@ POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] tableName:partition_test_partitioned -owner:jssarma -location:file:/mnt/vol/devrs004.snc1/jssarma/projects/hive_trunk/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 +owner:heyongqiang +location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/partition_test_partitioned/dt=101 inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat columns:struct columns { string key, string value} @@ -157,16 +157,16 @@ maxFileSize:888 minFileSize:888 lastAccessTime:0 -lastUpdateTime:1279737973000 +lastUpdateTime:1281167979000 PREHOOK: query: select key from partition_test_partitioned where dt=101 PREHOOK: type: QUERY PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-46-13_527_1591714627720505462/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-59-39_288_1071363340396075185/-mr-10000 POSTHOOK: query: select key from partition_test_partitioned where dt=101 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-46-13_527_1591714627720505462/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-07_00-59-39_288_1071363340396075185/-mr-10000 POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/protectmode.q.out =================================================================== --- ql/src/test/results/clientpositive/protectmode.q.out (revision 982799) +++ ql/src/test/results/clientpositive/protectmode.q.out (working copy) @@ -18,23 +18,23 @@ PREHOOK: query: select * from tbl1 PREHOOK: type: QUERY PREHOOK: Input: default@tbl1 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-16_084_5868788449948187519/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-03_139_5747715366863380530/-mr-10000 POSTHOOK: query: select * from tbl1 POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-16_084_5868788449948187519/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-03_139_5747715366863380530/-mr-10000 PREHOOK: query: select col from tbl1 PREHOOK: type: QUERY PREHOOK: Input: default@tbl1 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-16_303_5881584661921655666/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-03_397_5871188371505681904/-mr-10000 POSTHOOK: query: select col from tbl1 POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-16_303_5881584661921655666/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-03_397_5871188371505681904/-mr-10000 PREHOOK: query: alter table tbl1 enable offline -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: query: alter table tbl1 enable offline -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: Input: default@tbl1 POSTHOOK: Output: default@tbl1 PREHOOK: query: desc extended tbl1 @@ -43,11 +43,11 @@ POSTHOOK: type: DESCTABLE col string -Detailed Table Information Table(tableName:tbl1, dbName:default, owner:sdong, createTime:1280962816, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=sdong, last_modified_time=1280962819, PROTECT_MODE=OFFLINE, transient_lastDdlTime=1280962819}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:tbl1, dbName:default, owner:heyongqiang, createTime:1281148863, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148867, PROTECT_MODE=OFFLINE, transient_lastDdlTime=1281148867}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table tbl1 disable offline -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: query: alter table tbl1 disable offline -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: Input: default@tbl1 POSTHOOK: Output: default@tbl1 PREHOOK: query: desc extended tbl1 @@ -56,23 +56,23 @@ POSTHOOK: type: DESCTABLE col string -Detailed Table Information Table(tableName:tbl1, dbName:default, owner:sdong, createTime:1280962816, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=sdong, last_modified_time=1280962819, transient_lastDdlTime=1280962819}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:tbl1, dbName:default, owner:heyongqiang, createTime:1281148863, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148867, transient_lastDdlTime=1281148867}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: select * from tbl1 PREHOOK: type: QUERY PREHOOK: Input: default@tbl1 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-19_785_3387037181104157417/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-07_730_1993453490011072324/-mr-10000 POSTHOOK: query: select * from tbl1 POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-19_785_3387037181104157417/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-07_730_1993453490011072324/-mr-10000 PREHOOK: query: select col from tbl1 PREHOOK: type: QUERY PREHOOK: Input: default@tbl1 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-19_991_7208412460463641915/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-07_832_7085833949870836891/-mr-10000 POSTHOOK: query: select col from tbl1 POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-19_991_7208412460463641915/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-07_832_7085833949870836891/-mr-10000 PREHOOK: query: create table tbl2 (col string) partitioned by (p string) PREHOOK: type: CREATETABLE POSTHOOK: query: create table tbl2 (col string) partitioned by (p string) @@ -95,23 +95,25 @@ PREHOOK: query: select * from tbl2 where p='p1' PREHOOK: type: QUERY PREHOOK: Input: default@tbl2@p=p1 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-23_396_4211977439004283750/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-11_715_1211468753134615746/-mr-10000 POSTHOOK: query: select * from tbl2 where p='p1' POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl2@p=p1 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-23_396_4211977439004283750/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-11_715_1211468753134615746/-mr-10000 PREHOOK: query: select * from tbl2 where p='p2' PREHOOK: type: QUERY PREHOOK: Input: default@tbl2@p=p2 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-23_617_4676355123590038305/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-11_871_7266217973957398626/-mr-10000 POSTHOOK: query: select * from tbl2 where p='p2' POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl2@p=p2 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-23_617_4676355123590038305/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-11_871_7266217973957398626/-mr-10000 PREHOOK: query: alter table tbl2 partition (p='p1') enable offline -PREHOOK: type: null +PREHOOK: type: ALTERPARTITION_PROTECTMODE POSTHOOK: query: alter table tbl2 partition (p='p1') enable offline -POSTHOOK: type: null +POSTHOOK: type: ALTERPARTITION_PROTECTMODE +POSTHOOK: Input: default@tbl2@p=p1 +POSTHOOK: Output: default@tbl2@p=p1 PREHOOK: query: desc extended tbl2 partition (p='p1') PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended tbl2 partition (p='p1') @@ -119,11 +121,11 @@ col string p string -Detailed Partition Information Partition(values:[p1], dbName:default, tableName:tbl2, createTime:1280962823, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl2/p=p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{PROTECT_MODE=OFFLINE, transient_lastDdlTime=1280962823}) +Detailed Partition Information Partition(values:[p1], dbName:default, tableName:tbl2, createTime:1281148871, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl2/p=p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{last_modified_by=heyongqiang, last_modified_time=1281148872, PROTECT_MODE=OFFLINE, transient_lastDdlTime=1281148872}) PREHOOK: query: alter table tbl2 enable offline -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: query: alter table tbl2 enable offline -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: Input: default@tbl2 POSTHOOK: Output: default@tbl2 PREHOOK: query: desc extended tbl2 @@ -133,11 +135,11 @@ col string p string -Detailed Table Information Table(tableName:tbl2, dbName:default, owner:sdong, createTime:1280962823, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=sdong, last_modified_time=1280962824, PROTECT_MODE=OFFLINE, transient_lastDdlTime=1280962824}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:tbl2, dbName:default, owner:heyongqiang, createTime:1281148871, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148872, PROTECT_MODE=OFFLINE, transient_lastDdlTime=1281148872}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table tbl2 enable no_drop -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: query: alter table tbl2 enable no_drop -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: Input: default@tbl2 POSTHOOK: Output: default@tbl2 PREHOOK: query: desc extended tbl2 @@ -147,11 +149,11 @@ col string p string -Detailed Table Information Table(tableName:tbl2, dbName:default, owner:sdong, createTime:1280962823, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=sdong, last_modified_time=1280962824, PROTECT_MODE=OFFLINE,NO_DROP, transient_lastDdlTime=1280962824}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:tbl2, dbName:default, owner:heyongqiang, createTime:1281148871, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148872, PROTECT_MODE=OFFLINE,NO_DROP, transient_lastDdlTime=1281148872}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table tbl2 disable offline -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: query: alter table tbl2 disable offline -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: Input: default@tbl2 POSTHOOK: Output: default@tbl2 PREHOOK: query: desc extended tbl2 @@ -161,11 +163,11 @@ col string p string -Detailed Table Information Table(tableName:tbl2, dbName:default, owner:sdong, createTime:1280962823, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=sdong, last_modified_time=1280962824, PROTECT_MODE=NO_DROP, transient_lastDdlTime=1280962824}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:tbl2, dbName:default, owner:heyongqiang, createTime:1281148871, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148872, PROTECT_MODE=NO_DROP, transient_lastDdlTime=1281148872}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: alter table tbl2 disable no_drop -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: query: alter table tbl2 disable no_drop -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_PROTECTMODE POSTHOOK: Input: default@tbl2 POSTHOOK: Output: default@tbl2 PREHOOK: query: desc extended tbl2 @@ -175,27 +177,29 @@ col string p string -Detailed Table Information Table(tableName:tbl2, dbName:default, owner:sdong, createTime:1280962823, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=sdong, last_modified_time=1280962824, transient_lastDdlTime=1280962824}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:tbl2, dbName:default, owner:heyongqiang, createTime:1281148871, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:p, type:string, comment:null)], parameters:{last_modified_by=heyongqiang, last_modified_time=1281148872, transient_lastDdlTime=1281148872}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: select * from tbl2 where p='p2' PREHOOK: type: QUERY PREHOOK: Input: default@tbl2@p=p2 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-24_808_6663194749606163260/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-12_903_6190899374761689415/-mr-10000 POSTHOOK: query: select * from tbl2 where p='p2' POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl2@p=p2 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-24_808_6663194749606163260/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-12_903_6190899374761689415/-mr-10000 PREHOOK: query: select col from tbl2 where p='p2' PREHOOK: type: QUERY PREHOOK: Input: default@tbl2@p=p2 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-24_975_1396047242551851951/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-13_040_6865186751626031899/-mr-10000 POSTHOOK: query: select col from tbl2 where p='p2' POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl2@p=p2 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-24_975_1396047242551851951/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-13_040_6865186751626031899/-mr-10000 PREHOOK: query: alter table tbl2 partition (p='p1') disable offline -PREHOOK: type: null +PREHOOK: type: ALTERPARTITION_PROTECTMODE POSTHOOK: query: alter table tbl2 partition (p='p1') disable offline -POSTHOOK: type: null +POSTHOOK: type: ALTERPARTITION_PROTECTMODE +POSTHOOK: Input: default@tbl2@p=p1 +POSTHOOK: Output: default@tbl2@p=p1 PREHOOK: query: desc extended tbl2 partition (p='p1') PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended tbl2 partition (p='p1') @@ -203,23 +207,23 @@ col string p string -Detailed Partition Information Partition(values:[p1], dbName:default, tableName:tbl2, createTime:1280962823, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/data/users/sdong/hive-vendor-trunk-git/build/ql/test/data/warehouse/tbl2/p=p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1280962828}) +Detailed Partition Information Partition(values:[p1], dbName:default, tableName:tbl2, createTime:1281148871, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/tbl2/p=p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{last_modified_by=heyongqiang, last_modified_time=1281148876, transient_lastDdlTime=1281148876}) PREHOOK: query: select * from tbl2 where p='p1' PREHOOK: type: QUERY PREHOOK: Input: default@tbl2@p=p1 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-28_142_3955301982627210591/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-16_878_4933729810371048462/-mr-10000 POSTHOOK: query: select * from tbl2 where p='p1' POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl2@p=p1 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-28_142_3955301982627210591/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-16_878_4933729810371048462/-mr-10000 PREHOOK: query: select col from tbl2 where p='p1' PREHOOK: type: QUERY PREHOOK: Input: default@tbl2@p=p1 -PREHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-28_295_5354583470260616394/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-16_994_1879569288303678420/-mr-10000 POSTHOOK: query: select col from tbl2 where p='p1' POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl2@p=p1 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-08-04_16-00-28_295_5354583470260616394/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-08-06_19-41-16_994_1879569288303678420/-mr-10000 PREHOOK: query: insert overwrite table tbl1 select col from tbl2 where p='p1' PREHOOK: type: QUERY PREHOOK: Input: default@tbl2@p=p1 @@ -240,21 +244,27 @@ POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl2)tbl2.FieldSchema(name:p, type:string, comment:null), ] POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl1)tbl1.FieldSchema(name:col, type:string, comment:null), ] PREHOOK: query: alter table tbl2 partition (p='p1') enable no_drop -PREHOOK: type: null +PREHOOK: type: ALTERPARTITION_PROTECTMODE POSTHOOK: query: alter table tbl2 partition (p='p1') enable no_drop -POSTHOOK: type: null +POSTHOOK: type: ALTERPARTITION_PROTECTMODE +POSTHOOK: Input: default@tbl2@p=p1 +POSTHOOK: Output: default@tbl2@p=p1 POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl2)tbl2.FieldSchema(name:p, type:string, comment:null), ] POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl1)tbl1.FieldSchema(name:col, type:string, comment:null), ] PREHOOK: query: alter table tbl2 partition (p='p1') disable no_drop -PREHOOK: type: null +PREHOOK: type: ALTERPARTITION_PROTECTMODE POSTHOOK: query: alter table tbl2 partition (p='p1') disable no_drop -POSTHOOK: type: null +POSTHOOK: type: ALTERPARTITION_PROTECTMODE +POSTHOOK: Input: default@tbl2@p=p1 +POSTHOOK: Output: default@tbl2@p=p1 POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl2)tbl2.FieldSchema(name:p, type:string, comment:null), ] POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl1)tbl1.FieldSchema(name:col, type:string, comment:null), ] PREHOOK: query: alter table tbl2 partition (p='p2') enable no_drop -PREHOOK: type: null +PREHOOK: type: ALTERPARTITION_PROTECTMODE POSTHOOK: query: alter table tbl2 partition (p='p2') enable no_drop -POSTHOOK: type: null +POSTHOOK: type: ALTERPARTITION_PROTECTMODE +POSTHOOK: Input: default@tbl2@p=p2 +POSTHOOK: Output: default@tbl2@p=p2 POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl2)tbl2.FieldSchema(name:p, type:string, comment:null), ] POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl1)tbl1.FieldSchema(name:col, type:string, comment:null), ] PREHOOK: query: alter table tbl2 drop partition (p='p1') @@ -265,9 +275,11 @@ POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl2)tbl2.FieldSchema(name:p, type:string, comment:null), ] POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl1)tbl1.FieldSchema(name:col, type:string, comment:null), ] PREHOOK: query: alter table tbl2 partition (p='p2') disable no_drop -PREHOOK: type: null +PREHOOK: type: ALTERPARTITION_PROTECTMODE POSTHOOK: query: alter table tbl2 partition (p='p2') disable no_drop -POSTHOOK: type: null +POSTHOOK: type: ALTERPARTITION_PROTECTMODE +POSTHOOK: Input: default@tbl2@p=p2 +POSTHOOK: Output: default@tbl2@p=p2 POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl2)tbl2.FieldSchema(name:p, type:string, comment:null), ] POSTHOOK: Lineage: tbl1.col SIMPLE [(tbl1)tbl1.FieldSchema(name:col, type:string, comment:null), ] PREHOOK: query: drop table tbl1