diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java index 3c6d7eada9..4e76536a81 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java @@ -20,6 +20,10 @@ import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -61,4 +65,9 @@ public static boolean allowOperationInReplicationScope(Hive db, String tableName // Or the existing table is newer than our update. So, don't allow the update. return false; } + + public static boolean isSchemaEvolutionEnabled(Table table, Configuration conf) { + return AcidUtils.isTablePropertyTransactional(table.getMetadata()) || + HiveConf.getBoolVar(conf, ConfVars.HIVE_SCHEMA_EVOLUTION); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithConstraintsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithConstraintsDesc.java new file mode 100644 index 0000000000..2aa631987c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithConstraintsDesc.java @@ -0,0 +1,25 @@ +package org.apache.hadoop.hive.ql.ddl.table; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; + +public abstract class AlterTableWithConstraintsDesc extends AlterTableWithWriteIdDesc { + private static final long serialVersionUID = 1L; + + private final Constraints constraints; + + public AlterTableWithConstraintsDesc(AlterTableTypes type, String tableName, Map partitionSpec, + ReplicationSpec replicationSpec, boolean isCascade, boolean expectView, Constraints constraints) + throws SemanticException { + super(type, tableName, partitionSpec, replicationSpec, isCascade, expectView); + this.constraints = constraints; + } + + public Constraints getConstraints() { + return constraints; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithWriteIdDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithWriteIdDesc.java new file mode 100644 index 0000000000..a7598b59d3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithWriteIdDesc.java @@ -0,0 +1,82 @@ +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +public abstract class AlterTableWithWriteIdDesc implements DDLDesc, DDLDescWithWriteId, Serializable { + private static final long serialVersionUID = 1L; + + private final AlterTableTypes type; + private final String tableName; + private final Map partitionSpec; + private final ReplicationSpec replicationSpec; + private final boolean isCascade; + private final boolean expectView; + + private Map props; + + private Long writeId; + + public AlterTableWithWriteIdDesc(AlterTableTypes type, String tableName, Map partitionSpec, + ReplicationSpec replicationSpec, boolean isCascade, boolean expectView) throws SemanticException { + this.type = type; + this.tableName = String.join(".", Utilities.getDbTableName(tableName)); + this.partitionSpec = partitionSpec; + this.replicationSpec = replicationSpec; + this.isCascade = isCascade; + this.expectView = expectView; + } + + public AlterTableTypes getType() { + return type; + } + + @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getPartitionSpec() { + return partitionSpec; + } + + public ReplicationSpec getReplicationSpec() { + return replicationSpec; + } + + public boolean isCascade() { + return isCascade; + } + + public boolean expectView() { + return expectView; + } + + public Map getProps() { + return props; + } + + @Override + public String getFullTableName() { + return tableName; + } + + @Override + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + public Long getWriteId() { + return writeId; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithWriteIdOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithWriteIdOperation.java new file mode 100644 index 0000000000..9bdf42c39c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableWithWriteIdOperation.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableAddConstraintOperation; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Operation process of running some alter table command that requires write id. + */ +public abstract class AlterTableWithWriteIdOperation extends DDLOperation { + private final AlterTableWithWriteIdDesc desc; + + public AlterTableWithWriteIdOperation(DDLOperationContext context, AlterTableWithWriteIdDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + if (!AlterTableUtils.allowOperationInReplicationScope(context.getDb(), desc.getTableName(), null, + desc.getReplicationSpec())) { + // no alter, the table is missing either due to drop/rename which follows the alter. + // or the existing table is newer than our update. + LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getTableName()); + return 0; + } + + Table oldTable = context.getDb().getTable(desc.getTableName()); + List partitions = getPartitions(oldTable, desc.getPartitionSpec(), context); + + // Don't change the table object returned by the metastore, as we'll mess with it's caches. + Table table = oldTable.copy(); + + EnvironmentContext environmentContext = initializeEnvironmentContext(null); + + if (partitions == null) { + doAlteration(table, null); + } else { + for (Partition partition : partitions) { + doAlteration(table, partition); + } + } + + finalizeAlterTableWithWriteIdOp(table, oldTable, partitions, context, environmentContext, desc); + return 0; + } + + private List getPartitions(Table tbl, Map partSpec, DDLOperationContext context) + throws HiveException { + List partitions = null; + if (partSpec != null) { + if (DDLSemanticAnalyzer.isFullSpec(tbl, partSpec)) { + partitions = new ArrayList(); + Partition part = context.getDb().getPartition(tbl, partSpec, false); + if (part == null) { + // User provided a fully specified partition spec but it doesn't exist, fail. + throw new HiveException(ErrorMsg.INVALID_PARTITION, + StringUtils.join(partSpec.keySet(), ',') + " for table " + tbl.getTableName()); + + } + partitions.add(part); + } else { + // DDLSemanticAnalyzer has already checked if partial partition specs are allowed, + // thus we should not need to check it here. + partitions = context.getDb().getPartitions(tbl, partSpec); + } + } + + return partitions; + } + + private EnvironmentContext initializeEnvironmentContext(EnvironmentContext environmentContext) { + EnvironmentContext result = environmentContext == null ? new EnvironmentContext() : environmentContext; + // do not need update stats in alter table/partition operations + if (result.getProperties() == null || + result.getProperties().get(StatsSetupConst.DO_NOT_UPDATE_STATS) == null) { + result.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); + } + return result; + } + + protected abstract void doAlteration(Table table, Partition partition) throws HiveException; + + protected StorageDescriptor getStorageDescriptor(Table tbl, Partition part) { + return (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd()); + } + + public void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List partitions, + DDLOperationContext context, EnvironmentContext environmentContext, AlterTableWithWriteIdDesc alterTable) + throws HiveException { + if (partitions == null) { + updateModifiedParameters(table.getTTable().getParameters(), context.getConf()); + table.checkValidity(context.getConf()); + } else { + for (Partition partition : partitions) { + updateModifiedParameters(partition.getParameters(), context.getConf()); + } + } + + try { + environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, alterTable.getType().name()); + if (partitions == null) { + long writeId = alterTable.getWriteId() != null ? alterTable.getWriteId() : 0; + if (alterTable.getReplicationSpec() != null && alterTable.getReplicationSpec().isMigratingToTxnTable()) { + Long tmpWriteId = ReplUtils.getMigrationCurrentTblWriteId(context.getConf()); + if (tmpWriteId == null) { + throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); + } + writeId = tmpWriteId; + } + context.getDb().alterTable(alterTable.getTableName(), table, alterTable.isCascade(), environmentContext, true, + writeId); + } else { + // Note: this is necessary for UPDATE_STATISTICS command, that operates via ADDPROPS (why?). + // For any other updates, we don't want to do txn check on partitions when altering table. + boolean isTxn = false; + if (alterTable.getPartitionSpec() != null && alterTable.getType() == AlterTableTypes.ADDPROPS) { + // ADDPROPS is used to add replication properties like repl.last.id, which isn't + // transactional change. In case of replication check for transactional properties + // explicitly. + Map props = alterTable.getProps(); + if (alterTable.getReplicationSpec() != null && alterTable.getReplicationSpec().isInReplicationScope()) { + isTxn = (props.get(StatsSetupConst.COLUMN_STATS_ACCURATE) != null); + } else { + isTxn = true; + } + } + String qualifiedName = TableName.getDbTable(table.getTTable().getDbName(), table.getTTable().getTableName()); + context.getDb().alterPartitions(qualifiedName, partitions, environmentContext, isTxn); + } + // Add constraints if necessary + if (alterTable instanceof AlterTableWithConstraintsDesc) { + AlterTableAddConstraintOperation.addConstraints((AlterTableWithConstraintsDesc)alterTable, context.getDb()); + } + } catch (InvalidOperationException e) { + LOG.error("alter table: ", e); + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } + + // This is kind of hacky - the read entity contains the old table, whereas the write entity contains the new + // table. This is needed for rename - both the old and the new table names are passed + // Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer. + if (partitions != null ) { + for (Partition partition : partitions) { + context.getWork().getInputs().add(new ReadEntity(partition)); + DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context); + } + } else { + context.getWork().getInputs().add(new ReadEntity(oldTable)); + DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context); + } + } + + private static void updateModifiedParameters(Map params, HiveConf conf) throws HiveException { + String user = SessionState.getUserFromAuthenticator(); + params.put("last_modified_by", user); + params.put("last_modified_time", Long.toString(System.currentTimeMillis() / 1000)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java new file mode 100644 index 0000000000..05ebb7222b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... ADD COLUMNS ... commands. + */ +@Explain(displayName = "Add Columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableAddColumnsDesc extends AlterTableWithWriteIdDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableAddColumnsDesc.class, AlterTableAddColumnsOperation.class); + } + + private final List newColumns; + + public AlterTableAddColumnsDesc(String tableName, Map partitionSpec, boolean isCascade, + List newColumns) throws SemanticException { + super(AlterTableTypes.ADDCOLS, tableName, partitionSpec, null, isCascade, false); + this.newColumns = newColumns; + } + + @Explain(displayName = "new columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getNewColumns() { + return newColumns; + } + + @Override + public boolean mayNeedWriteId() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsOperation.java new file mode 100644 index 0000000000..22a3d7fba0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsOperation.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; + +/** + * Operation process of adding some new columns. + */ +public class AlterTableAddColumnsOperation extends AlterTableWithWriteIdOperation { + private final AlterTableAddColumnsDesc desc; + + public AlterTableAddColumnsOperation(DDLOperationContext context, AlterTableAddColumnsDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + String serializationLib = sd.getSerdeInfo().getSerializationLib(); + AvroSerdeUtils.handleAlterTableForAvro(context.getConf(), serializationLib, table.getTTable().getParameters()); + + List oldColumns = (partition == null ? table.getColsForMetastore() : partition.getColsForMetastore()); + List newColumns = desc.getNewColumns(); + + if (serializationLib.equals("org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { + context.getConsole().printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); + sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); + sd.setCols(newColumns); + } else { + // make sure the columns does not already exist + for (FieldSchema newColumn : newColumns) { + for (FieldSchema oldColumn : oldColumns) { + if (oldColumn.getName().equalsIgnoreCase(newColumn.getName())) { + throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newColumn.getName()); + } + } + + oldColumns.add(newColumn); + } + sd.setCols(oldColumns); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java new file mode 100644 index 0000000000..dc1158f27b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithConstraintsDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... CHANGE COLUMN ... commands. + */ +@Explain(displayName = "Change Column", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableChangeColumnDesc extends AlterTableWithConstraintsDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableChangeColumnDesc.class, AlterTableChangeColumnOperation.class); + } + + private final String oldColumnName; + private final String newColumnName; + private final String newColumnType; + private final String newColumnComment; + private final boolean first; + private final String afterColumn; + + public AlterTableChangeColumnDesc(String tableName, Map partitionSpec, boolean isCascade, + Constraints constraints, String oldColumnName, String newColumnName, String newColumnType, + String newColumnComment, boolean first, String afterColumn) throws SemanticException { + super(AlterTableTypes.RENAMECOLUMN, tableName, partitionSpec, null, isCascade, false, constraints); + + this.oldColumnName = oldColumnName; + this.newColumnName = newColumnName; + this.newColumnType = newColumnType; + this.newColumnComment = newColumnComment; + this.first = first; + this.afterColumn = afterColumn; + } + + public String getOldColumnName() { + return oldColumnName; + } + + public String getNewColumnName() { + return newColumnName; + } + + public String getNewColumnType() { + return newColumnType; + } + + public String getNewColumnComment() { + return newColumnComment; + } + + public boolean isFirst() { + return first; + } + + public String getAfterColumn() { + return afterColumn; + } + + @Override + public boolean mayNeedWriteId() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnOperation.java new file mode 100644 index 0000000000..69b08c2eaf --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnOperation.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableUtils; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdOperation; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; + +/** + * Operation process changing a column. + */ +public class AlterTableChangeColumnOperation extends AlterTableWithWriteIdOperation { + private final AlterTableChangeColumnDesc desc; + + public AlterTableChangeColumnOperation(DDLOperationContext context, AlterTableChangeColumnDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + String serializationLib = sd.getSerdeInfo().getSerializationLib(); + AvroSerdeUtils.handleAlterTableForAvro(context.getConf(), serializationLib, table.getTTable().getParameters()); + + // if orc table, restrict reordering columns as it will break schema evolution + boolean isOrcSchemaEvolution = sd.getInputFormat().equals(OrcInputFormat.class.getName()) && + AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()); + if (isOrcSchemaEvolution && (desc.isFirst() || StringUtils.isNotBlank(desc.getAfterColumn()))) { + throw new HiveException(ErrorMsg.CANNOT_REORDER_COLUMNS, desc.getTableName()); + } + + FieldSchema column = null; + boolean found = false; + int position = desc.isFirst() ? 0 : -1; + int i = 1; + + List oldColumns = (partition == null ? table.getColsForMetastore() : partition.getColsForMetastore()); + List newColumns = new ArrayList(); + for (FieldSchema oldColumn : oldColumns) { + String oldColumnName = oldColumn.getName(); + if (oldColumnName.equalsIgnoreCase(desc.getOldColumnName())) { + oldColumn.setName(desc.getNewColumnName()); + if (StringUtils.isNotBlank(desc.getNewColumnType())) { + oldColumn.setType(desc.getNewColumnType()); + } + if (desc.getNewColumnComment() != null) { + oldColumn.setComment(desc.getNewColumnComment()); + } + found = true; + if (desc.isFirst() || StringUtils.isNotBlank(desc.getAfterColumn())) { + column = oldColumn; + continue; + } + } else if (oldColumnName.equalsIgnoreCase(desc.getNewColumnName())) { + throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, desc.getNewColumnName()); + } + + if (oldColumnName.equalsIgnoreCase(desc.getAfterColumn())) { + position = i; + } + + i++; + newColumns.add(oldColumn); + } + + if (!found) { + throw new HiveException(ErrorMsg.INVALID_COLUMN, desc.getOldColumnName()); + } + if (StringUtils.isNotBlank(desc.getAfterColumn()) && position < 0) { + throw new HiveException(ErrorMsg.INVALID_COLUMN, desc.getAfterColumn()); + } + + if (position >= 0) { + newColumns.add(position, column); + } + + sd.setCols(newColumns); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java new file mode 100644 index 0000000000..0536ea53c9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... REPLACE COLUMNS ... commands. + */ +@Explain(displayName = "Replace Columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableReplaceColumnsDesc extends AlterTableWithWriteIdDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableReplaceColumnsDesc.class, AlterTableReplaceColumnsOperation.class); + } + + private final List newColumns; + + public AlterTableReplaceColumnsDesc(String tableName, Map partitionSpec, boolean isCascade, + List newColumns) throws SemanticException { + super(AlterTableTypes.REPLACECOLS, tableName, partitionSpec, null, isCascade, false); + this.newColumns = newColumns; + } + + @Explain(displayName = "new columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getNewColumns() { + return newColumns; + } + + @Override + public boolean mayNeedWriteId() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java new file mode 100644 index 0000000000..a112149598 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableUtils; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdOperation; +import org.apache.hadoop.hive.ql.io.orc.OrcSerde; +import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; +import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; +import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; + +import avro.shaded.com.google.common.collect.ImmutableSet; + +/** + * Operation process of replacing two columns. + */ +public class AlterTableReplaceColumnsOperation extends AlterTableWithWriteIdOperation { + private final AlterTableReplaceColumnsDesc desc; + + public AlterTableReplaceColumnsOperation(DDLOperationContext context, AlterTableReplaceColumnsDesc desc) { + super(context, desc); + this.desc = desc; + } + + private static final Set VALID_SERIALIZATION_LIBS = ImmutableSet.of( + MetadataTypedColumnsetSerDe.class.getName(), LazySimpleSerDe.class.getName(), ColumnarSerDe.class.getName(), + DynamicSerDe.class.getName(), ParquetHiveSerDe.class.getName(), OrcSerde.class.getName()); + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + // change SerDe to LazySimpleSerDe if it is columnsetSerDe + String serializationLib = sd.getSerdeInfo().getSerializationLib(); + if (serializationLib.equals("org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { + context.getConsole().printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); + sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); + } else if (!VALID_SERIALIZATION_LIBS.contains(serializationLib)) { + throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, desc.getTableName()); + } + + // adding columns and limited integer type promotion is not supported for ORC schema evolution + boolean isOrcSchemaEvolution = serializationLib.equals(OrcSerde.class.getName()) && + AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()); + if (isOrcSchemaEvolution) { + List existingCols = sd.getCols(); + List replaceCols = desc.getNewColumns(); + + if (replaceCols.size() < existingCols.size()) { + throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, desc.getTableName()); + } + } + + boolean droppingColumns = desc.getNewColumns().size() < sd.getCols().size(); + if (ParquetHiveSerDe.isParquetTable(table) && AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()) && + !desc.isCascade() && droppingColumns && table.isPartitioned()) { + LOG.warn("Cannot drop columns from a partitioned parquet table without the CASCADE option"); + throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, desc.getTableName()); + } + + sd.setCols(desc.getNewColumns()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java new file mode 100644 index 0000000000..52e8c9a838 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... UPDATE COLUMNS ... commands. + */ +@Explain(displayName = "Update Columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableUpdateColumnsDesc extends AlterTableWithWriteIdDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableUpdateColumnsDesc.class, AlterTableUpdateColumnsOperation.class); + } + + public AlterTableUpdateColumnsDesc(String tableName, Map partitionSpec, boolean isCascade) throws SemanticException { + super(AlterTableTypes.UPDATECOLUMNS, tableName, partitionSpec, null, isCascade, false); + } + + @Override + public boolean mayNeedWriteId() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsOperation.java new file mode 100644 index 0000000000..39c4baabc1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsOperation.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.util.Collection; +import java.util.List; + +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.serde2.Deserializer; + +/** + * Operation process of adding some new columns. + */ +public class AlterTableUpdateColumnsOperation extends AlterTableWithWriteIdOperation { + public AlterTableUpdateColumnsOperation(DDLOperationContext context, AlterTableUpdateColumnsDesc desc) { + super(context, desc); + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + //StorageDescriptor sd = getStorageDescriptor(table, partition); + String serializationLib = table.getSd().getSerdeInfo().getSerializationLib(); + + Collection serdes = MetastoreConf.getStringCollection(context.getConf(), + MetastoreConf.ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA); + if (serdes.contains(serializationLib)) { + throw new HiveException(table.getTableName() + " has serde " + serializationLib + " for which schema " + + "is already handled by HMS."); + } + + Deserializer deserializer = table.getDeserializer(true); + try { + LOG.info("Updating metastore columns for table: {}", table.getTableName()); + List fields = HiveMetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), deserializer); + StorageDescriptor sd = getStorageDescriptor(table, partition); + sd.setCols(fields); + } catch (org.apache.hadoop.hive.serde2.SerDeException | MetaException e) { + LOG.error("alter table update columns: {}", e); + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/ShowColumnsDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/ShowColumnsDesc.java index 7047f56275..a289958060 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/ShowColumnsDesc.java @@ -15,109 +15,55 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table.column; import java.io.Serializable; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; -public class ShowColumnsDesc extends DDLDesc implements Serializable { +/** + * DDL task description for SHOW COLUMNS commands. + */ +@Explain(displayName = "Show Columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowColumnsDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String pattern; - String tableName; - String resFile; - /** - * table name for the result of show columns. - */ - private static final String table = "show_columns"; - /** - * thrift ddl for the result of show columns. - */ - private static final String schema = "Field#string"; - public String getTable() { - return table; + static { + DDLTask2.registerOperation(ShowColumnsDesc.class, ShowColumnsOperation.class); } - public String getSchema() { - return schema; - } + public static final String SCHEMA = "Field#string"; - public ShowColumnsDesc() { - } + private final String resFile; + private final String tableName; + private final String pattern; - /** - * @param resFile - */ - public ShowColumnsDesc(Path resFile) { - this.resFile = resFile.toString(); - tableName = null; - } - - /** - * @param tableName name of table to show columns of - */ public ShowColumnsDesc(Path resFile, String tableName) { - this.resFile = resFile.toString(); - this.tableName = tableName; + this(resFile, tableName, null); } - /** - * @param tableName name of table to show columns of - */ public ShowColumnsDesc(Path resFile, String tableName, String pattern) { this.resFile = resFile.toString(); this.pattern = pattern; this.tableName = tableName; } - - /** - * @return the pattern - */ @Explain(displayName = "pattern") public String getPattern() { return pattern; } - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the tableName - */ @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { return tableName; } - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the resFile - */ @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/ShowColumnsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/ShowColumnsOperation.java new file mode 100644 index 0000000000..068863df8b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/ShowColumnsOperation.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.column; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; +import org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataTable; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * Operation process of showing the columns. + */ +public class ShowColumnsOperation extends DDLOperation { + private final ShowColumnsDesc desc; + + public ShowColumnsOperation(DDLOperationContext context, ShowColumnsDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // write the results in the file + try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + List columns = getColumnsByPattern(); + writeColumns(outStream, columns); + } catch (IOException e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } + + return 0; + } + + private List getColumnsByPattern() throws HiveException { + List columns = getCols(); + Matcher matcher = getMatcher(); + return filterColumns(columns, matcher); + } + + private List getCols() throws HiveException { + Table table = context.getDb().getTable(desc.getTableName()); + List allColumns = new ArrayList<>(); + allColumns.addAll(table.getCols()); + allColumns.addAll(table.getPartCols()); + return allColumns; + } + + private Matcher getMatcher() { + String columnPattern = desc.getPattern(); + if (columnPattern == null) { + columnPattern = "*"; + } + columnPattern = columnPattern.toLowerCase(); + columnPattern = columnPattern.replaceAll("\\*", ".*"); + + Pattern pattern = Pattern.compile(columnPattern); + return pattern.matcher(""); + } + + private List filterColumns(List columns, Matcher matcher) { + ArrayList result = new ArrayList<>(); + for (FieldSchema column : columns) { + matcher.reset(column.getName()); + if (matcher.matches()) { + result.add(column); + } + } + + result.sort( + new Comparator() { + @Override + public int compare(FieldSchema f1, FieldSchema f2) { + return f1.getName().compareTo(f2.getName()); + } + }); + + return result; + } + + private void writeColumns(DataOutputStream outStream, List columns) throws IOException { + TextMetaDataTable tmd = new TextMetaDataTable(); + for (FieldSchema fieldSchema : columns) { + tmd.addRow(MetaDataFormatUtils.extractColumnValues(fieldSchema)); + } + + // In case the query is served by HiveServer2, don't pad it with spaces, + // as HiveServer2 output is consumed by JDBC/ODBC clients. + boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + outStream.writeBytes(tmd.renderTable(isOutputPadded)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/package-info.java new file mode 100644 index 0000000000..447d61b794 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Table column related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.column; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java new file mode 100644 index 0000000000..6569b1bc04 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.table.constaint; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithConstraintsDesc; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... ADD CONSTRAINT ... commands. + */ +@Explain(displayName = "Add Constraint", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableAddConstraintDesc extends AlterTableWithConstraintsDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableAddConstraintDesc.class, AlterTableAddConstraintOperation.class); + } + + public AlterTableAddConstraintDesc(String tableName, ReplicationSpec replicationSpec, Constraints constraints) + throws SemanticException { + super(AlterTableTypes.ADDCONSTRAINT, tableName, null, replicationSpec, false, false, constraints); + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintOperation.java new file mode 100644 index 0000000000..80b8f7ad9f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintOperation.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.constaint; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithConstraintsDesc; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of adding a new constraint. + */ +public class AlterTableAddConstraintOperation extends DDLOperation { + private final AlterTableAddConstraintDesc desc; + + public AlterTableAddConstraintOperation(DDLOperationContext context, AlterTableAddConstraintDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws Exception { + if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), desc.getTableName(), null, + desc.getReplicationSpec())) { + // no alter, the table is missing either due to drop/rename which follows the alter. + // or the existing table is newer than our update. + LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getTableName()); + return 0; + } + + addConstraints(desc, context.getDb()); + return 0; + } + + // This function is used by other operations that may modify the constraints + public static void addConstraints(AlterTableWithConstraintsDesc desc, Hive db) throws HiveException { + try { + Constraints constraints = desc.getConstraints(); + // This is either an alter table add foreign key or add primary key command. + if (CollectionUtils.isNotEmpty(constraints.getPrimaryKeys())) { + db.addPrimaryKey(constraints.getPrimaryKeys()); + } + if (CollectionUtils.isNotEmpty(constraints.getForeignKeys())) { + try { + db.addForeignKey(constraints.getForeignKeys()); + } catch (HiveException e) { + if (e.getCause() instanceof InvalidObjectException && desc.getReplicationSpec() != null && + desc.getReplicationSpec().isInReplicationScope()) { + // During repl load, NoSuchObjectException in foreign key shall + // ignore as the foreign table may not be part of the replication + LOG.debug("InvalidObjectException: ", e); + } else { + throw e; + } + } + } + if (CollectionUtils.isNotEmpty(constraints.getUniqueConstraints())) { + db.addUniqueConstraint(constraints.getUniqueConstraints()); + } + if (CollectionUtils.isNotEmpty(constraints.getNotNullConstraints())) { + db.addNotNullConstraint(constraints.getNotNullConstraints()); + } + if (CollectionUtils.isNotEmpty(constraints.getDefaultConstraints())) { + db.addDefaultConstraint(constraints.getDefaultConstraints()); + } + if (CollectionUtils.isNotEmpty(constraints.getCheckConstraints())) { + db.addCheckConstraint(constraints.getCheckConstraints()); + } + } catch (NoSuchObjectException e) { + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintDesc.java new file mode 100644 index 0000000000..dddf7a4721 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintDesc.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.ddl.table.constaint; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... DROP CONSTRAINT ... commands. + */ +@Explain(displayName = "Drop Constraint", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableDropConstraintDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableDropConstraintDesc.class, AlterTableDropConstraintOperation.class); + } + + private final String tableName; + private final ReplicationSpec replicationSpec; + private final String constraintName; + + public AlterTableDropConstraintDesc(String tableName, ReplicationSpec replicationSpec, String constraintName) + throws SemanticException { + this.tableName = String.join(".", Utilities.getDbTableName(tableName)); + this.replicationSpec = replicationSpec; + this.constraintName = constraintName; + } + + public String getTableName() { + return tableName; + } + + public ReplicationSpec getReplicationSpec() { + return replicationSpec; + } + + public String getConstraintName() { + return constraintName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintOperation.java new file mode 100644 index 0000000000..84c750ee5a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintOperation.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.constaint; + +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of dropping a new constraint. + */ +public class AlterTableDropConstraintOperation extends DDLOperation { + private final AlterTableDropConstraintDesc desc; + + public AlterTableDropConstraintOperation(DDLOperationContext context, AlterTableDropConstraintDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws Exception { + if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), desc.getTableName(), null, + desc.getReplicationSpec())) { + // no alter, the table is missing either due to drop/rename which follows the alter. + // or the existing table is newer than our update. + LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getTableName()); + return 0; + } + + try { + context.getDb().dropConstraint(Utilities.getDatabaseName(desc.getTableName()), + Utilities.getTableName(desc.getTableName()), desc.getConstraintName()); + } catch (NoSuchObjectException e) { + throw new HiveException(e); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/Constraints.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/Constraints.java new file mode 100644 index 0000000000..fa2cde4ec1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/Constraints.java @@ -0,0 +1,57 @@ +package org.apache.hadoop.hive.ql.ddl.table.constaint; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; + +public class Constraints implements Serializable { + private static final long serialVersionUID = 1L; + + private final List primaryKeys; + private final List foreignKeys; + private final List notNullConstraints; + private final List uniqueConstraints; + private final List defaultConstraints; + private final List checkConstraints; + + public Constraints(List primaryKeys, List foreignKeys, + List notNullConstraints, List uniqueConstraints, + List defaultConstraints, List checkConstraints) { + this.primaryKeys = primaryKeys; + this.foreignKeys = foreignKeys; + this.notNullConstraints = notNullConstraints; + this.uniqueConstraints = uniqueConstraints; + this.defaultConstraints = defaultConstraints; + this.checkConstraints = checkConstraints; + } + + public List getPrimaryKeys() { + return primaryKeys; + } + + public List getForeignKeys() { + return foreignKeys; + } + + public List getNotNullConstraints() { + return notNullConstraints; + } + + public List getUniqueConstraints() { + return uniqueConstraints; + } + + public List getDefaultConstraints() { + return defaultConstraints; + } + + public List getCheckConstraints() { + return checkConstraints; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/package-info.java new file mode 100644 index 0000000000..fc662b3737 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Table constraint related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.constaint; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java index cdd1777767..17d97bad5b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** * DDL task description for DESC table_name commands. */ diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 0c531bed51..2e955aef9d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -24,16 +24,11 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Comparator; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; @@ -53,17 +48,13 @@ import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.DriverContext; @@ -80,14 +71,11 @@ import org.apache.hadoop.hive.ql.io.merge.MergeFileWork; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcSerde; -import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PartitionIterable; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; -import org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataTable; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; @@ -109,19 +97,12 @@ import org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc; import org.apache.hadoop.hive.ql.plan.RCFileMergeDesc; import org.apache.hadoop.hive.ql.plan.ReplRemoveFirstIncLoadPendFlagDesc; -import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; -import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; -import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; -import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.tools.HadoopArchives; import org.apache.hadoop.util.ToolRunner; @@ -190,13 +171,7 @@ public int execute(DriverContext driverContext) { LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName()); return 0; } - if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT ) { - return dropConstraint(db, alterTbl); - } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) { - return addConstraints(db, alterTbl); - } else { - return alterTable(db, alterTbl); - } + return alterTable(db, alterTbl); } AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc(); @@ -217,11 +192,6 @@ public int execute(DriverContext driverContext) { return msck(db, msckDesc); } - ShowColumnsDesc showCols = work.getShowColumnsDesc(); - if (showCols != null) { - return showColumns(db, showCols); - } - ShowConfDesc showConf = work.getShowConfDesc(); if (showConf != null) { return showConf(db, showConf); @@ -313,16 +283,6 @@ private int showConf(Hive db, ShowConfDesc showConf) throws Exception { return 0; } - private DataOutputStream getOutputStream(String resFile) throws HiveException { - try { - return getOutputStream(new Path(resFile)); - } catch (HiveException e) { - throw e; - } catch (Exception e) { - throw new HiveException(e); - } - } - private DataOutputStream getOutputStream(Path outputFile) throws HiveException { try { FileSystem fs = outputFile.getFileSystem(conf); @@ -1127,80 +1087,6 @@ private int msck(Hive db, MsckDesc msckDesc) { } } - /** - * Write a list of the columns in the table to a file. - * - * @param db - * The database in context. - * @param showCols - * A ShowColumnsDesc for columns we're interested in. - * @return Returns 0 when execution succeeds. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - public int showColumns(Hive db, ShowColumnsDesc showCols) - throws HiveException { - - Table table = db.getTable(showCols.getTableName()); - - // write the results in the file - DataOutputStream outStream = getOutputStream(showCols.getResFile()); - try { - List allCols = table.getCols(); - allCols.addAll(table.getPartCols()); - List cols = getColumnsByPattern(allCols,showCols.getPattern()); - // In case the query is served by HiveServer2, don't pad it with spaces, - // as HiveServer2 output is consumed by JDBC/ODBC clients. - boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); - TextMetaDataTable tmd = new TextMetaDataTable(); - for (FieldSchema fieldSchema : cols) { - tmd.addRow(MetaDataFormatUtils.extractColumnValues(fieldSchema)); - } - outStream.writeBytes(tmd.renderTable(isOutputPadded)); - } catch (IOException e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - - /** - * Returns a sorted list of columns matching a column pattern. - * - * @param cols - * Columns of a table. - * @param columnPattern - * we want to find columns similar to a column pattern. - * @return sorted list of columns. - */ - private List getColumnsByPattern(List cols, String columnPattern) { - - if(columnPattern == null) { - columnPattern = "*"; - } - columnPattern = columnPattern.toLowerCase(); - columnPattern = columnPattern.replaceAll("\\*", ".*"); - Pattern pattern = Pattern.compile(columnPattern); - Matcher matcher = pattern.matcher(""); - - SortedSet sortedCol = new TreeSet<>( new Comparator() { - @Override - public int compare(FieldSchema f1, FieldSchema f2) { - return f1.getName().compareTo(f2.getName()); - } - }); - - for(FieldSchema column : cols) { - matcher.reset(column.getName()); - if(matcher.matches()) { - sortedCol.add(column); - } - } - - return new ArrayList(sortedCol); - } - /** * Alter a given table. * @@ -1303,8 +1189,6 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext, isTxn); } - // Add constraints if necessary - addConstraints(db, alterTbl); } catch (InvalidOperationException e) { LOG.error("alter table: ", e); throw new HiveException(e, ErrorMsg.GENERIC_ERROR); @@ -1398,152 +1282,6 @@ private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName())); tbl.setTableName(Utilities.getTableName(alterTbl.getNewName())); - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - String serializationLib = sd.getSerdeInfo().getSerializationLib(); - AvroSerdeUtils.handleAlterTableForAvro(conf, serializationLib, tbl.getTTable().getParameters()); - List oldCols = (part == null - ? tbl.getColsForMetastore() : part.getColsForMetastore()); - List newCols = alterTbl.getNewCols(); - if (serializationLib.equals( - "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { - console - .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setCols(newCols); - } else { - // make sure the columns does not already exist - Iterator iterNewCols = newCols.iterator(); - while (iterNewCols.hasNext()) { - FieldSchema newCol = iterNewCols.next(); - String newColName = newCol.getName(); - Iterator iterOldCols = oldCols.iterator(); - while (iterOldCols.hasNext()) { - String oldColName = iterOldCols.next().getName(); - if (oldColName.equalsIgnoreCase(newColName)) { - throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newColName); - } - } - oldCols.add(newCol); - } - sd.setCols(oldCols); - } - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - String serializationLib = sd.getSerdeInfo().getSerializationLib(); - AvroSerdeUtils.handleAlterTableForAvro(conf, serializationLib, tbl.getTTable().getParameters()); - List oldCols = (part == null - ? tbl.getColsForMetastore() : part.getColsForMetastore()); - List newCols = new ArrayList(); - Iterator iterOldCols = oldCols.iterator(); - String oldName = alterTbl.getOldColName(); - String newName = alterTbl.getNewColName(); - String type = alterTbl.getNewColType(); - String comment = alterTbl.getNewColComment(); - boolean first = alterTbl.getFirst(); - String afterCol = alterTbl.getAfterCol(); - // if orc table, restrict reordering columns as it will break schema evolution - boolean isOrcSchemaEvolution = - sd.getInputFormat().equals(OrcInputFormat.class.getName()) && - isSchemaEvolutionEnabled(tbl); - if (isOrcSchemaEvolution && (first || (afterCol != null && !afterCol.trim().isEmpty()))) { - throw new HiveException(ErrorMsg.CANNOT_REORDER_COLUMNS, alterTbl.getOldName()); - } - FieldSchema column = null; - - boolean found = false; - int position = -1; - if (first) { - position = 0; - } - - int i = 1; - while (iterOldCols.hasNext()) { - FieldSchema col = iterOldCols.next(); - String oldColName = col.getName(); - if (oldColName.equalsIgnoreCase(newName) - && !oldColName.equalsIgnoreCase(oldName)) { - throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newName); - } else if (oldColName.equalsIgnoreCase(oldName)) { - col.setName(newName); - if (type != null && !type.trim().equals("")) { - col.setType(type); - } - if (comment != null) { - col.setComment(comment); - } - found = true; - if (first || (afterCol != null && !afterCol.trim().equals(""))) { - column = col; - continue; - } - } - - if (afterCol != null && !afterCol.trim().equals("") - && oldColName.equalsIgnoreCase(afterCol)) { - position = i; - } - - i++; - newCols.add(col); - } - - // did not find the column - if (!found) { - throw new HiveException(ErrorMsg.INVALID_COLUMN, oldName); - } - // after column is not null, but we did not find it. - if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) { - throw new HiveException(ErrorMsg.INVALID_COLUMN, afterCol); - } - - if (position >= 0) { - newCols.add(position, column); - } - - sd.setCols(newCols); - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - // change SerDe to LazySimpleSerDe if it is columnsetSerDe - String serializationLib = sd.getSerdeInfo().getSerializationLib(); - if (serializationLib.equals( - "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { - console - .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - } else if (!serializationLib.equals( - MetadataTypedColumnsetSerDe.class.getName()) - && !serializationLib.equals(LazySimpleSerDe.class.getName()) - && !serializationLib.equals(ColumnarSerDe.class.getName()) - && !serializationLib.equals(DynamicSerDe.class.getName()) - && !serializationLib.equals(ParquetHiveSerDe.class.getName()) - && !serializationLib.equals(OrcSerde.class.getName())) { - throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, alterTbl.getOldName()); - } - final boolean isOrcSchemaEvolution = - serializationLib.equals(OrcSerde.class.getName()) && - isSchemaEvolutionEnabled(tbl); - // adding columns and limited integer type promotion is supported for ORC schema evolution - if (isOrcSchemaEvolution) { - final List existingCols = sd.getCols(); - final List replaceCols = alterTbl.getNewCols(); - - if (replaceCols.size() < existingCols.size()) { - throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, alterTbl.getOldName()); - } - } - - boolean partitioned = tbl.isPartitioned(); - boolean droppingColumns = alterTbl.getNewCols().size() < sd.getCols().size(); - if (ParquetHiveSerDe.isParquetTable(tbl) && - isSchemaEvolutionEnabled(tbl) && - !alterTbl.getIsCascade() && - droppingColumns && partitioned) { - LOG.warn("Cannot drop columns from a partitioned parquet table without the CASCADE option"); - throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, - alterTbl.getOldName()); - } - sd.setCols(alterTbl.getNewCols()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) { return alterTableAddProps(alterTbl, tbl, part, environmentContext); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) { @@ -1700,8 +1438,6 @@ private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition } tbl.setNumBuckets(alterTbl.getNumberBuckets()); } - } else if (alterTbl.getOp() == AlterTableTypes.UPDATECOLUMNS) { - updateColumns(tbl, part); } else { throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString()); } @@ -1851,84 +1587,6 @@ private void checkMmLb(Partition part) throws HiveException { return result; } - private int dropConstraint(Hive db, AlterTableDesc alterTbl) - throws SemanticException, HiveException { - try { - db.dropConstraint(Utilities.getDatabaseName(alterTbl.getOldName()), - Utilities.getTableName(alterTbl.getOldName()), - alterTbl.getConstraintName()); - } catch (NoSuchObjectException e) { - throw new HiveException(e); - } - return 0; - } - - private int addConstraints(Hive db, AlterTableDesc alterTbl) - throws SemanticException, HiveException { - try { - // This is either an alter table add foreign key or add primary key command. - if (alterTbl.getPrimaryKeyCols() != null && !alterTbl.getPrimaryKeyCols().isEmpty()) { - db.addPrimaryKey(alterTbl.getPrimaryKeyCols()); - } - if (alterTbl.getForeignKeyCols() != null && !alterTbl.getForeignKeyCols().isEmpty()) { - try { - db.addForeignKey(alterTbl.getForeignKeyCols()); - } catch (HiveException e) { - if (e.getCause() instanceof InvalidObjectException - && alterTbl.getReplicationSpec()!= null && alterTbl.getReplicationSpec().isInReplicationScope()) { - // During repl load, NoSuchObjectException in foreign key shall - // ignore as the foreign table may not be part of the replication - LOG.debug("InvalidObjectException: ", e); - } else { - throw e; - } - } - } - if (alterTbl.getUniqueConstraintCols() != null - && !alterTbl.getUniqueConstraintCols().isEmpty()) { - db.addUniqueConstraint(alterTbl.getUniqueConstraintCols()); - } - if (alterTbl.getNotNullConstraintCols() != null - && !alterTbl.getNotNullConstraintCols().isEmpty()) { - db.addNotNullConstraint(alterTbl.getNotNullConstraintCols()); - } - if (alterTbl.getDefaultConstraintCols() != null - && !alterTbl.getDefaultConstraintCols().isEmpty()) { - db.addDefaultConstraint(alterTbl.getDefaultConstraintCols()); - } - if (alterTbl.getCheckConstraintCols() != null - && !alterTbl.getCheckConstraintCols().isEmpty()) { - db.addCheckConstraint(alterTbl.getCheckConstraintCols()); - } - } catch (NoSuchObjectException e) { - throw new HiveException(e); - } - return 0; - } - - private int updateColumns(Table tbl, Partition part) - throws HiveException { - String serializationLib = tbl.getSd().getSerdeInfo().getSerializationLib(); - if (MetastoreConf.getStringCollection(conf, - MetastoreConf.ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA).contains(serializationLib)) { - throw new HiveException(tbl.getTableName() + " has serde " + serializationLib + " for which schema " + - "is already handled by HMS."); - } - Deserializer deserializer = tbl.getDeserializer(true); - try { - LOG.info("Updating metastore columns for table: {}", tbl.getTableName()); - final List fields = HiveMetaStoreUtils.getFieldsFromDeserializer( - tbl.getTableName(), deserializer); - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - sd.setCols(fields); - } catch (org.apache.hadoop.hive.serde2.SerDeException | MetaException e) { - LOG.error("alter table update columns: {}", e); - throw new HiveException(e, ErrorMsg.GENERIC_ERROR); - } - - return 0; - } - /** * Update last_modified_by and last_modified_time parameters in parameter map. * diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 99d7f21228..aedd10051d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -91,6 +91,15 @@ import org.apache.hadoop.hive.ql.ddl.process.KillQueriesDesc; import org.apache.hadoop.hive.ql.ddl.process.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.ddl.process.ShowTransactionsDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableWithWriteIdDesc; +import org.apache.hadoop.hive.ql.ddl.table.column.AlterTableAddColumnsDesc; +import org.apache.hadoop.hive.ql.ddl.table.column.AlterTableChangeColumnDesc; +import org.apache.hadoop.hive.ql.ddl.table.column.AlterTableReplaceColumnsDesc; +import org.apache.hadoop.hive.ql.ddl.table.column.AlterTableUpdateColumnsDesc; +import org.apache.hadoop.hive.ql.ddl.table.column.ShowColumnsDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableAddConstraintDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableDropConstraintDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; import org.apache.hadoop.hive.ql.ddl.table.creation.ShowCreateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; @@ -171,7 +180,6 @@ import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -314,9 +322,9 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) { analyzeAlterTableArchive(qualified, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) { - analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.ADDCOLS); + analyzeAlterTableAddCols(qualified, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) { - analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.REPLACECOLS); + analyzeAlterTableReplaceCols(qualified, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) { analyzeAlterTableRenameCol(catName, qualified, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { @@ -1797,11 +1805,11 @@ else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ rootTasks.add(TaskFactory.get(ddlWork)); } - private void setAcidDdlDesc(DDLDescWithWriteId alterTblDesc) { + private void setAcidDdlDesc(DDLDescWithWriteId descWithWriteId) { if(this.ddlDescWithWriteId != null) { throw new IllegalStateException("ddlDescWithWriteId is already set: " + this.ddlDescWithWriteId); } - this.ddlDescWithWriteId = alterTblDesc; + this.ddlDescWithWriteId = descWithWriteId; } @Override @@ -1876,9 +1884,23 @@ private WriteType determineAlterTableWriteType(Table tab, AlterTableDesc desc, A } return WriteEntity.determineAlterTableWriteType(op); } + + // For the time while all the alter table operations are getting migrated there is a duplication of this method here + private WriteType determineAlterTableWriteType(Table tab, AlterTableWithWriteIdDesc desc, AlterTableTypes op) { + boolean convertingToAcid = false; + if(desc != null && desc.getProps() != null && Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) { + convertingToAcid = true; + } + if(!AcidUtils.isTransactionalTable(tab) && convertingToAcid) { + //non-acid to transactional conversion (property itself) must be mutexed to prevent concurrent writes. + // See HIVE-16688 for use cases. + return WriteType.DDL_EXCLUSIVE; + } + return WriteEntity.determineAlterTableWriteType(op); + } private void addInputsOutputsAlterTable(String tableName, Map partSpec, AlterTableTypes op) throws SemanticException { - addInputsOutputsAlterTable(tableName, partSpec, null, op, false); + addInputsOutputsAlterTable(tableName, partSpec, (AlterTableDesc)null, op, false); } private void addInputsOutputsAlterTable(String tableName, Map partSpec, @@ -1968,6 +1990,84 @@ private void addInputsOutputsAlterTable(String tableName, Map pa } } + // For the time while all the alter table operations are getting migrated there is a duplication of this method here + private void addInputsOutputsAlterTable(String tableName, Map partSpec, + AlterTableWithWriteIdDesc desc, AlterTableTypes op, boolean doForceExclusive) throws SemanticException { + boolean isCascade = desc != null && desc.isCascade(); + boolean alterPartitions = partSpec != null && !partSpec.isEmpty(); + //cascade only occurs at table level then cascade to partition level + if (isCascade && alterPartitions) { + throw new SemanticException( + ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName()); + } + + Table tab = getTable(tableName, true); + // cascade only occurs with partitioned table + if (isCascade && !tab.isPartitioned()) { + throw new SemanticException( + ErrorMsg.ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED); + } + + // Determine the lock type to acquire + WriteEntity.WriteType writeType = doForceExclusive + ? WriteType.DDL_EXCLUSIVE : determineAlterTableWriteType(tab, desc, op); + + if (!alterPartitions) { + inputs.add(new ReadEntity(tab)); + alterTableOutput = new WriteEntity(tab, writeType); + outputs.add(alterTableOutput); + //do not need the lock for partitions since they are covered by the table lock + if (isCascade) { + for (Partition part : getPartitions(tab, partSpec, false)) { + outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); + } + } + } else { + ReadEntity re = new ReadEntity(tab); + // In the case of altering a table for its partitions we don't need to lock the table + // itself, just the partitions. But the table will have a ReadEntity. So mark that + // ReadEntity as no lock. + re.noLockNeeded(); + inputs.add(re); + + if (isFullSpec(tab, partSpec)) { + // Fully specified partition spec + Partition part = getPartition(tab, partSpec, true); + outputs.add(new WriteEntity(part, writeType)); + } else { + // Partial partition spec supplied. Make sure this is allowed. + if (!AlterTableDesc.doesAlterTableTypeSupportPartialPartitionSpec(op)) { + throw new SemanticException( + ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName()); + } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) { + throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED); + } + + for (Partition part : getPartitions(tab, partSpec, true)) { + outputs.add(new WriteEntity(part, writeType)); + } + } + } + + if (desc != null) { + validateAlterTableType(tab, op, desc.expectView()); + + // validate Unset Non Existed Table Properties +/* if (op == AlterTableDesc.AlterTableTypes.DROPPROPS && !desc.getIsDropIfExists()) { + Map tableParams = tab.getTTable().getParameters(); + for (String currKey : desc.getProps().keySet()) { + if (!tableParams.containsKey(currKey)) { + String errorMsg = + "The following property " + currKey + + " does not exist in " + tab.getTableName(); + throw new SemanticException( + ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg)); + } + } + }*/ + } + } + private void analyzeAlterTableOwner(ASTNode ast, String tableName) throws SemanticException { PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) ast.getChild(0)); @@ -2231,11 +2331,10 @@ private void analyzeAlterTableCompact(ASTNode ast, String tableName, private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName) throws SemanticException { - String dropConstraintName = unescapeIdentifier(ast.getChild(0).getText()); - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, dropConstraintName, (ReplicationSpec)null); + String constraintName = unescapeIdentifier(ast.getChild(0).getText()); + AlterTableDropConstraintDesc alterTblDesc = new AlterTableDropConstraintDesc(tableName, null, constraintName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) @@ -2273,11 +2372,12 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg( child.getToken().getText())); } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, primaryKeys, foreignKeys, - uniqueConstraints, null, null, checkConstraints, null); + + Constraints constraints = new Constraints(primaryKeys, foreignKeys, null, uniqueConstraints, null, + checkConstraints); + AlterTableAddConstraintDesc alterTblDesc = new AlterTableAddConstraintDesc(tableName, null, constraints); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void analyzeAlterTableUpdateColumns(ASTNode ast, String tableName, @@ -2288,17 +2388,13 @@ private void analyzeAlterTableUpdateColumns(ASTNode ast, String tableName, isCascade = true; } - AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.UPDATECOLUMNS); - alterTblDesc.setOldName(tableName); - alterTblDesc.setIsCascade(isCascade); - alterTblDesc.setPartSpec(partSpec); + AlterTableUpdateColumnsDesc alterTblDesc = new AlterTableUpdateColumnsDesc(tableName, partSpec, isCascade); Table tbl = getTable(tableName); if (AcidUtils.isTransactionalTable(tbl)) { setAcidDdlDesc(alterTblDesc); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc), conf)); } static HashMap getProps(ASTNode prop) { @@ -2729,9 +2825,8 @@ private void analyzeShowColumns(ASTNode ast) throws SemanticException { Table tab = getTable(tableName); inputs.add(new ReadEntity(tab)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showColumnsDesc))); - setFetchTask(createFetchTask(showColumnsDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showColumnsDesc))); + setFetchTask(createFetchTask(ShowColumnsDesc.SCHEMA)); } private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { @@ -3279,28 +3374,18 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN } String tblName = getDotName(qualified); - AlterTableDesc alterTblDesc; - if (primaryKeys == null && foreignKeys == null - && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null - && checkConstraints == null) { - alterTblDesc = new AlterTableDesc(tblName, partSpec, - unescapeIdentifier(oldColName), unescapeIdentifier(newColName), - newType, newComment, first, flagCol, isCascade); - } else { - alterTblDesc = new AlterTableDesc(tblName, partSpec, - unescapeIdentifier(oldColName), unescapeIdentifier(newColName), - newType, newComment, first, flagCol, isCascade, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); - } - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); + Constraints constraints = new Constraints(primaryKeys, foreignKeys, notNullConstraints, uniqueConstraints, + defaultConstraints, checkConstraints); + AlterTableChangeColumnDesc alterTblDesc = new AlterTableChangeColumnDesc(tblName, partSpec, isCascade, constraints, + unescapeIdentifier(oldColName), unescapeIdentifier(newColName), newType, newComment, first, flagCol); + addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, alterTblDesc.getType(), false); if (AcidUtils.isTransactionalTable(tab)) { // Note: we might actually need it only when certain changes (e.g. name or type?) are made. setAcidDdlDesc(alterTblDesc); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, @@ -3344,8 +3429,8 @@ private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, alterBucketNum))); } - private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast, - HashMap partSpec, AlterTableTypes alterType) throws SemanticException { + private void analyzeAlterTableAddCols(String[] qualified, ASTNode ast, Map partSpec) + throws SemanticException { String tblName = getDotName(qualified); List newCols = getColumns((ASTNode) ast.getChild(0)); @@ -3354,16 +3439,34 @@ private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast, isCascade = true; } - AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, newCols, - alterType, isCascade); + AlterTableAddColumnsDesc desc = new AlterTableAddColumnsDesc(tblName, partSpec, isCascade, newCols); + Table table = getTable(tblName, true); + if (AcidUtils.isTransactionalTable(table)) { + setAcidDdlDesc(desc); + } + + addInputsOutputsAlterTable(tblName, partSpec, desc, desc.getType(), false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), desc))); + } + + private void analyzeAlterTableReplaceCols(String[] qualified, ASTNode ast, HashMap partSpec) + throws SemanticException { + + String tblName = getDotName(qualified); + List newCols = getColumns((ASTNode) ast.getChild(0)); + boolean isCascade = false; + if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { + isCascade = true; + } + + AlterTableReplaceColumnsDesc alterTblDesc = new AlterTableReplaceColumnsDesc(tblName, partSpec, isCascade, newCols); Table table = getTable(tblName, true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, alterTblDesc.getType(), false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean expectView) diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java index bba769244b..b2e90fe752 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java @@ -23,14 +23,13 @@ import java.util.List; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableAddConstraintDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; public class AddForeignKeyHandler extends AbstractMessageHandler { @Override @@ -67,13 +66,14 @@ fk.setFktable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), fks, - new ArrayList(), context.eventOnlyReplicationSpec()); - Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + Constraints constraints = new Constraints(null, fks, null, null, null, null); + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + context.eventOnlyReplicationSpec(), constraints); + Task addConstraintsTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); - return Collections.singletonList(addConstraintsTask); + return Collections.singletonList(addConstraintsTask); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java index 90d9008a31..4273e445fb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java @@ -22,18 +22,14 @@ import java.util.Collections; import java.util.List; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableAddConstraintDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; public class AddNotNullConstraintHandler extends AbstractMessageHandler { @Override @@ -65,18 +61,14 @@ nn.setTable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, - new ArrayList(), - new ArrayList(), - new ArrayList(), - nns, new ArrayList(), - new ArrayList(), - context.eventOnlyReplicationSpec()); - Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + Constraints constraints = new Constraints(null, null, nns, null, null, null); + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + context.eventOnlyReplicationSpec(), constraints); + Task addConstraintsTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); - return Collections.singletonList(addConstraintsTask); + return Collections.singletonList(addConstraintsTask); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java index e8966ad7c4..6cb4722dd4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java @@ -22,15 +22,14 @@ import java.util.Collections; import java.util.List; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableAddConstraintDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; public class AddPrimaryKeyHandler extends AbstractMessageHandler { @Override @@ -62,13 +61,14 @@ pk.setTable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, pks, new ArrayList(), - new ArrayList(), context.eventOnlyReplicationSpec()); - Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + Constraints constraints = new Constraints(pks, null, null, null, null, null); + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + context.eventOnlyReplicationSpec(), constraints); + Task addConstraintsTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); - return Collections.singletonList(addConstraintsTask); + return Collections.singletonList(addConstraintsTask); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java index 81f1c5ab20..9b010d7c74 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java @@ -22,15 +22,14 @@ import java.util.Collections; import java.util.List; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableAddConstraintDesc; +import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; public class AddUniqueConstraintHandler extends AbstractMessageHandler { @Override @@ -62,13 +61,14 @@ uk.setTable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), new ArrayList(), - uks, context.eventOnlyReplicationSpec()); - Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + Constraints constraints = new Constraints(null, null, null, uks, null, null); + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + context.eventOnlyReplicationSpec(), constraints); + Task addConstraintsTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); - return Collections.singletonList(addConstraintsTask); + return Collections.singletonList(addConstraintsTask); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java index 5f9f879f6f..def207eec0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableDropConstraintDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import java.io.Serializable; import java.util.Collections; @@ -37,12 +37,12 @@ String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName; String constraintName = msg.getConstraint(); - AlterTableDesc dropConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, constraintName, - context.eventOnlyReplicationSpec()); - Task dropConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc), context.hiveConf); + AlterTableDropConstraintDesc dropConstraintsDesc = new AlterTableDropConstraintDesc( + actualDbName + "." + actualTblName, context.eventOnlyReplicationSpec(), constraintName); + Task dropConstraintsTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, dropConstraintsDesc), context.hiveConf); context.log.debug("Added drop constrain task : {}:{}", dropConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); - return Collections.singletonList(dropConstraintsTask); + return Collections.singletonList(dropConstraintsTask); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 8603521041..46b428e41e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -23,12 +23,6 @@ import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Table; @@ -117,8 +111,6 @@ String newColName; String newColType; String newColComment; - boolean first; - String afterCol; boolean expectView; HashMap partSpec; private String newLocation; @@ -134,13 +126,6 @@ boolean isTurnOffSorting = false; boolean isCascade = false; EnvironmentContext environmentContext; - String dropConstraintName; - List primaryKeyCols; - List foreignKeyCols; - List uniqueConstraintCols; - List notNullConstraintCols; - List defaultConstraintsCols; - List checkConstraintsCols; ReplicationSpec replicationSpec; private Long writeId = null; PrincipalDesc ownerPrincipal; @@ -149,58 +134,6 @@ public AlterTableDesc() { } - /** - * @param tblName - * table name - * @param oldColName - * old column name - * @param newColName - * new column name - * @param newComment - * @param newType - * @throws SemanticException - */ - public AlterTableDesc(String tblName, HashMap partSpec, - String oldColName, String newColName, String newType, String newComment, - boolean first, String afterCol, boolean isCascade) throws SemanticException { - super(); - setOldName(tblName); - this.partSpec = partSpec; - this.oldColName = oldColName; - this.newColName = newColName; - newColType = newType; - newColComment = newComment; - this.first = first; - this.afterCol = afterCol; - op = AlterTableTypes.RENAMECOLUMN; - this.isCascade = isCascade; - } - - public AlterTableDesc(String tblName, HashMap partSpec, - String oldColName, String newColName, String newType, String newComment, - boolean first, String afterCol, boolean isCascade, List primaryKeyCols, - List foreignKeyCols, List uniqueConstraintCols, - List notNullConstraintCols, List defaultConstraints, - List checkConstraints) throws SemanticException { - super(); - setOldName(tblName); - this.partSpec = partSpec; - this.oldColName = oldColName; - this.newColName = newColName; - newColType = newType; - newColComment = newComment; - this.first = first; - this.afterCol = afterCol; - op = AlterTableTypes.RENAMECOLUMN; - this.isCascade = isCascade; - this.primaryKeyCols = primaryKeyCols; - this.foreignKeyCols = foreignKeyCols; - this.uniqueConstraintCols = uniqueConstraintCols; - this.notNullConstraintCols = notNullConstraintCols; - this.defaultConstraintsCols = defaultConstraints; - this.checkConstraintsCols = checkConstraints; - } - /** * @param oldName * old name of the table @@ -341,39 +274,6 @@ public AlterTableDesc(String tableName, HashMap partSpec, int nu this.numberBuckets = numBuckets; } - public AlterTableDesc(String tableName, String dropConstraintName, ReplicationSpec replicationSpec) throws SemanticException { - setOldName(tableName); - this.dropConstraintName = dropConstraintName; - this.replicationSpec = replicationSpec; - op = AlterTableTypes.DROPCONSTRAINT; - } - - public AlterTableDesc(String tableName, List primaryKeyCols, - List foreignKeyCols, List uniqueConstraintCols, - ReplicationSpec replicationSpec) throws SemanticException { - setOldName(tableName); - this.primaryKeyCols = primaryKeyCols; - this.foreignKeyCols = foreignKeyCols; - this.uniqueConstraintCols = uniqueConstraintCols; - this.replicationSpec = replicationSpec; - op = AlterTableTypes.ADDCONSTRAINT; - } - - public AlterTableDesc(String tableName, List primaryKeyCols, - List foreignKeyCols, List uniqueConstraintCols, - List notNullConstraintCols, List defaultConstraints, - List checkConstraints, ReplicationSpec replicationSpec) throws SemanticException { - setOldName(tableName); - this.primaryKeyCols = primaryKeyCols; - this.foreignKeyCols = foreignKeyCols; - this.uniqueConstraintCols = uniqueConstraintCols; - this.notNullConstraintCols = notNullConstraintCols; - this.defaultConstraintsCols = defaultConstraints; - this.checkConstraintsCols = checkConstraints; - this.replicationSpec = replicationSpec; - op = AlterTableTypes.ADDCONSTRAINT; - } - public AlterTableDesc(String tableName, PrincipalDesc ownerPrincipal) { op = AlterTableTypes.OWNER; this.oldName = tableName; @@ -537,78 +437,6 @@ public String getStorageHandler() { return storageHandler; } - /** - * @param primaryKeyCols - * the primary key cols to set - */ - public void setPrimaryKeyCols(List primaryKeyCols) { - this.primaryKeyCols = primaryKeyCols; - } - - /** - * @return the primary key cols - */ - public List getPrimaryKeyCols() { - return primaryKeyCols; - } - - /** - * @param foreignKeyCols - * the foreign key cols to set - */ - public void setForeignKeyCols(List foreignKeyCols) { - this.foreignKeyCols = foreignKeyCols; - } - - /** - * @return the foreign key cols - */ - public List getForeignKeyCols() { - return foreignKeyCols; - } - - /** - * @return the unique constraint cols - */ - public List getUniqueConstraintCols() { - return uniqueConstraintCols; - } - - /** - * @return the not null constraint cols - */ - public List getNotNullConstraintCols() { - return notNullConstraintCols; - } - - /** - * @return the default constraint cols - */ - public List getDefaultConstraintCols() { - return defaultConstraintsCols; - } - - /** - * @return the check constraint cols - */ - public List getCheckConstraintCols() { return checkConstraintsCols; } - - /** - * @return the drop constraint name of the table - */ - @Explain(displayName = "drop constraint name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getConstraintName() { - return dropConstraintName; - } - - /** - * @param constraintName - * the dropConstraintName to set - */ - public void setDropConstraintName(String constraintName) { - this.dropConstraintName = constraintName; - } - /** * @param storageHandler * the storage handler to set @@ -722,36 +550,6 @@ public void setNewColComment(String newComment) { newColComment = newComment; } - /** - * @return if the column should be changed to position 0 - */ - public boolean getFirst() { - return first; - } - - /** - * @param first - * set the column to position 0 - */ - public void setFirst(boolean first) { - this.first = first; - } - - /** - * @return the column's after position - */ - public String getAfterCol() { - return afterCol; - } - - /** - * @param afterCol - * set the column's after position - */ - public void setAfterCol(String afterCol) { - this.afterCol = afterCol; - } - /** * @return whether to expect a view being altered */ diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 6cd84bb8ab..07feae32e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -35,7 +35,6 @@ // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates. private InsertCommitHookDesc insertCommitHookDesc; private AlterTableDesc alterTblDesc; - private ShowColumnsDesc showColumnsDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; @@ -80,16 +79,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.alterTblDesc = alterTblDesc; } - /** - * @param showColumnsDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowColumnsDesc showColumnsDesc) { - this(inputs, outputs); - - this.showColumnsDesc = showColumnsDesc; - } - /** * @param inputs * @param outputs @@ -142,14 +131,6 @@ public AlterTableDesc getAlterTblDesc() { return alterTblDesc; } - /** - * @return the showColumnsDesc - */ - @Explain(displayName = "Show Columns Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowColumnsDesc getShowColumnsDesc() { - return showColumnsDesc; - } - /** * @return information about the table/partitions we want to alter. */