diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index d5374bc..9a8bb4d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -3259,22 +3259,67 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { // alter the table Table tbl = db.getTable(alterTbl.getOldName()); - Partition part = null; List allPartitions = null; if (alterTbl.getPartSpec() != null) { - if (alterTbl.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { - part = db.getPartition(tbl, alterTbl.getPartSpec(), false); - if (part == null) { - throw new HiveException(ErrorMsg.INVALID_PARTITION, + allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec()); + if (allPartitions.isEmpty()) { + throw new HiveException(ErrorMsg.INVALID_PARTITION, StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName()); - } - } - else { - allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec()); + } } Table oldTbl = tbl.copy(); + if (allPartitions != null) { + // Alter all partitions + for (Partition part : allPartitions) { + alterTableOrSinglePartition(alterTbl, tbl, part); + } + } else { + // Just alter the table + alterTableOrSinglePartition(alterTbl, tbl, null); + } + + if (allPartitions == null) { + updateModifiedParameters(tbl.getTTable().getParameters(), conf); + tbl.checkValidity(); + } else { + for (Partition tmpPart: allPartitions) { + updateModifiedParameters(tmpPart.getParameters(), conf); + } + } + + try { + if (allPartitions == null) { + db.alterTable(alterTbl.getOldName(), tbl); + } else { + db.alterPartitions(tbl.getTableName(), allPartitions); + } + } catch (InvalidOperationException e) { + LOG.info("alter table: " + stringifyException(e)); + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } + + // This is kind of hacky - the read entity contains the old table, whereas + // the write entity + // contains the new table. This is needed for rename - both the old and the + // new table names are + // passed + // Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer. + if (allPartitions != null ) { + for (Partition tmpPart: allPartitions) { + work.getInputs().add(new ReadEntity(tmpPart)); + work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL_NO_LOCK)); + } + } else { + work.getInputs().add(new ReadEntity(oldTbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); + } + return 0; + } + + private int alterTableOrSinglePartition(AlterTableDesc alterTbl, Table tbl, Partition part) + throws HiveException { List oldCols = (part == null ? tbl.getCols() : part.getCols()); StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd()); @@ -3420,12 +3465,10 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType(); ProtectMode mode = null; - if (allPartitions != null) { - for (Partition tmpPart: allPartitions) { - mode = tmpPart.getProtectMode(); - setAlterProtectMode(protectModeEnable, protectMode, mode); - tmpPart.setProtectMode(mode); - } + if (part != null) { + mode = part.getProtectMode(); + setAlterProtectMode(protectModeEnable, protectMode, mode); + part.setProtectMode(mode); } else { mode = tbl.getProtectMode(); setAlterProtectMode(protectModeEnable,protectMode, mode); @@ -3468,12 +3511,12 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { throw new HiveException(e); } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSKEWEDBY) { - /* Validation's been done at compile time. no validation is needed here. */ + // Validation's been done at compile time. no validation is needed here. List skewedColNames = null; List> skewedValues = null; if (alterTbl.isTurnOffSkewed()) { - /* Convert skewed table to non-skewed table. */ + // Convert skewed table to non-skewed table. skewedColNames = new ArrayList(); skewedValues = new ArrayList>(); } else { @@ -3482,7 +3525,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } if ( null == tbl.getSkewedInfo()) { - /* Convert non-skewed table to skewed table. */ + // Convert non-skewed table to skewed table. SkewedInfo skewedInfo = new SkewedInfo(); skewedInfo.setSkewedColNames(skewedColNames); skewedInfo.setSkewedColValues(skewedValues); @@ -3524,59 +3567,12 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } tbl.setNumBuckets(alterTbl.getNumberBuckets()); } - } else { + } else { throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString()); } - if (part == null && allPartitions == null) { - updateModifiedParameters(tbl.getTTable().getParameters(), conf); - tbl.checkValidity(); - } else if (part != null) { - updateModifiedParameters(part.getParameters(), conf); - } - else { - for (Partition tmpPart: allPartitions) { - updateModifiedParameters(tmpPart.getParameters(), conf); - } - } - - try { - if (part == null && allPartitions == null) { - db.alterTable(alterTbl.getOldName(), tbl); - } else if (part != null) { - db.alterPartition(tbl.getTableName(), part); - } - else { - db.alterPartitions(tbl.getTableName(), allPartitions); - } - } catch (InvalidOperationException e) { - LOG.info("alter table: " + stringifyException(e)); - throw new HiveException(e, ErrorMsg.GENERIC_ERROR); - } - - // This is kind of hacky - the read entity contains the old table, whereas - // the write entity - // contains the new table. This is needed for rename - both the old and the - // new table names are - // passed - // Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer. - if(part != null) { - work.getInputs().add(new ReadEntity(part)); - work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); - } - else if (allPartitions != null ){ - for (Partition tmpPart: allPartitions) { - work.getInputs().add(new ReadEntity(tmpPart)); - work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - else { - work.getInputs().add(new ReadEntity(oldTbl)); - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - } return 0; } - /** * Drop a given table or some partitions. DropTableDesc is currently used for both. * diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 4e58ad8..131dfb3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -164,6 +164,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private static final Log LOG = LogFactory.getLog(DDLSemanticAnalyzer.class); private static final Map TokenToTypeName = new HashMap(); + private static final Set alterTableTypesWithPartialSpec = + new HashSet(); private final Set reservedPartitionValues; private final HiveAuthorizationTaskFactory hiveAuthorizationTaskFactory; @@ -184,6 +186,16 @@ TokenToTypeName.put(HiveParser.TOK_DATETIME, serdeConstants.DATETIME_TYPE_NAME); TokenToTypeName.put(HiveParser.TOK_TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME); TokenToTypeName.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME); + + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDCOLS); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.REPLACECOLS); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.RENAMECOLUMN); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDPROPS); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.DROPPROPS); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDSERDE); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDSERDEPROPS); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDFILEFORMAT); } public static String getTypeName(ASTNode node) throws SemanticException { @@ -1399,7 +1411,9 @@ private void addInputsOutputsAlterTable(String tableName, Map pa // ReadEntity as no lock. re.noLockNeeded(); inputs.add(re); - if (desc == null || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { + + if (desc == null || !alterTableTypesWithPartialSpec.contains(desc.getOp())) { + // Only fully specified partition spec allowed for this operation. Partition part = getPartition(tab, partSpec, true); outputs.add(new WriteEntity(part, writeType)); } diff --git ql/src/test/queries/clientpositive/alter_partition_change_col.q ql/src/test/queries/clientpositive/alter_partition_change_col.q index baabb9f..61e607d 100644 --- ql/src/test/queries/clientpositive/alter_partition_change_col.q +++ ql/src/test/queries/clientpositive/alter_partition_change_col.q @@ -57,3 +57,7 @@ alter table alter_partition_change_col1 partition (p1='abc') add columns (c2 dec describe alter_partition_change_col1 partition (p1='abc'); select * from alter_partition_change_col1; +-- Try changing column for all partitions at once +alter table alter_partition_change_col1 partition (p1) change column c2 c2 decimal(10,0); +describe alter_partition_change_col1 partition (p1='abc'); +describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__'); diff --git ql/src/test/results/clientpositive/alter_partition_change_col.q.out ql/src/test/results/clientpositive/alter_partition_change_col.q.out index 7123e40..f68d5c3 100644 --- ql/src/test/results/clientpositive/alter_partition_change_col.q.out +++ ql/src/test/results/clientpositive/alter_partition_change_col.q.out @@ -576,3 +576,45 @@ Tom 19.00 __HIVE_DEFAULT_PARTITION__ Tom 19.00 abc Tom 234.79 __HIVE_DEFAULT_PARTITION__ Tom 234.79 abc +PREHOOK: query: -- Try changing column for all partitions at once +alter table alter_partition_change_col1 partition (p1) change column c2 c2 decimal(10,0) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Output: default@alter_partition_change_col1@p1=abc +POSTHOOK: query: -- Try changing column for all partitions at once +alter table alter_partition_change_col1 partition (p1) change column c2 c2 decimal(10,0) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +POSTHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Output: default@alter_partition_change_col1@p1=abc +PREHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(10,0) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(10,0) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string