diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java index 254d0a39a6..3e36ae497c 100644 --- druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java +++ druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java @@ -137,7 +137,7 @@ private static final HttpClient HTTP_CLIENT; - private static final List ALLOWED_ALTER_TYPES = ImmutableList.of("ADDPROPS", "DROPPROPS", "ADD_COLUMNS"); + private static final List ALLOWED_ALTER_TYPES = ImmutableList.of("SET_PROPERTIES", "UNSET_PROPERTIES", "ADD_COLUMNS"); static { final Lifecycle lifecycle = new Lifecycle(); diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 554df3c6bf..eb36cfcd2a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -23,9 +23,9 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.security.AccessControlException; import java.io.FileNotFoundException; @@ -214,7 +214,7 @@ ALTER_COMMAND_FOR_VIEWS(10131, "To alter a view you need to use the ALTER VIEW command."), ALTER_COMMAND_FOR_TABLES(10132, "To alter a base table you need to use the ALTER TABLE command."), ALTER_VIEW_DISALLOWED_OP(10133, "Cannot use this form of ALTER on a view"), - ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for " + AlterTableTypes.nonNativeTableAllowedTypes + " to a non-native table "), + ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for " + AlterTableTypes.NON_NATIVE_TABLE_ALLOWED + " to a non-native table "), SORTMERGE_MAPJOIN_FAILED(10135, "Sort merge bucketed join could not be performed. " + "If you really want to perform the operation, either set " + diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java index 432779b3f4..dcf8c007fd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java @@ -21,12 +21,12 @@ import java.io.Serializable; import java.util.Map; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.Explain; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -50,7 +50,7 @@ public AbstractAlterTableDesc(AlterTableTypes type, String tableName, Map props) throws SemanticException { this.type = type; - this.tableName = String.join(".", Utilities.getDbTableName(tableName)); + this.tableName = tableName.contains(".") ? tableName : String.join(".", Utilities.getDbTableName(tableName)); this.partitionSpec = partitionSpec; this.replicationSpec = replicationSpec; this.isCascade = isCascade; @@ -91,6 +91,10 @@ public boolean expectView() { return props; } + public EnvironmentContext getEnvironmentContext() { + return null; + }; + @Override public String getFullTableName() { return tableName; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java index baf98da37a..221510d603 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.session.SessionState; /** @@ -74,7 +73,7 @@ public int execute() throws HiveException { // Don't change the table object returned by the metastore, as we'll mess with it's caches. Table table = oldTable.copy(); - environmentContext = initializeEnvironmentContext(null); + environmentContext = initializeEnvironmentContext(desc.getEnvironmentContext()); if (partitions == null) { doAlteration(table, null); @@ -154,11 +153,11 @@ public void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List props = alterTable.getProps(); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java index 9babf2a1a9..aa953a9430 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; /** * Abstract ancestor of all ALTER TABLE descriptors that are handled by the AlterTableWithWriteIdOperations framework diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableTypes.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableTypes.java new file mode 100644 index 0000000000..6c7f2a7791 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableTypes.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.util.List; +import java.util.Set; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; + +/** + * Enumeration of alter table command types. + */ +public enum AlterTableTypes { + // column + ADD_COLUMNS("add columns"), + REPLACE_COLUMNS("replace columns"), + RENAME_COLUMN("rename column"), + UPDATE_COLUMNS("update columns"), + // partition + ADDPARTITION("add partition"), + DROPPARTITION("drop partition"), + RENAMEPARTITION("rename partition"), // Note: used in RenamePartitionDesc, not here. + ALTERPARTITION("alter partition"), // Note: this is never used in AlterTableDesc. + // constraint + ADD_CONSTRAINT("add constraint"), + DROP_CONSTRAINT("drop constraint"), + // storage + SET_SERDE("set serde"), + SET_SERDE_PROPS("set serde props"), + SET_FILE_FORMAT("add fileformat"), + CLUSTERED_BY("clustered by"), + NOT_SORTED("not sorted"), + NOT_CLUSTERED("not clustered"), + SET_LOCATION("set location"), + SKEWED_BY("skewed by"), + NOT_SKEWED("not skewed"), + SET_SKEWED_LOCATION("alter skew location"), + INTO_BUCKETS("alter bucket number"), + // misc + SET_PROPERTIES("set properties"), + UNSET_PROPERTIES("unset properties"), + TOUCH("touch"), + RENAME("rename"), + OWNER("set owner"), + ARCHIVE("archieve"), + UNARCHIVE("unarchieve"), + COMPACT("compact"), + TRUNCATE("truncate"), + MERGEFILES("merge files"), + UPDATESTATS("update stats"); // Note: used in ColumnStatsUpdateWork, not here. + + private final String name; + + AlterTableTypes(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public static final List NON_NATIVE_TABLE_ALLOWED = + ImmutableList.of(SET_PROPERTIES, UNSET_PROPERTIES, ADD_COLUMNS); + + public static final Set SUPPORT_PARTIAL_PARTITION_SPEC = + ImmutableSet.of(ADD_COLUMNS, REPLACE_COLUMNS, RENAME_COLUMN, SET_PROPERTIES, UNSET_PROPERTIES, SET_SERDE, + SET_SERDE_PROPS, SET_FILE_FORMAT); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java index e40ba1819d..57d01c4ac6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java @@ -23,9 +23,9 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java index ce3b97eb68..a9bca6005e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java @@ -21,9 +21,9 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableWithConstraintsDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java index 3975f6682a..32c21150bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java @@ -23,9 +23,9 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java index 18485c9a81..50028f5d8a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java index 2077c7d7e6..825768a184 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java @@ -19,9 +19,9 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableWithConstraintsDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java new file mode 100644 index 0000000000..de42c49ec5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... RENAME TO ... commands. + */ +@Explain(displayName = "Rename Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableRenameDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableRenameDesc.class, AlterTableRenameOperation.class); + } + + private final String newName; + + public AlterTableRenameDesc(String tableName, ReplicationSpec replicationSpec, boolean expectView, String newName) + throws SemanticException { + super(AlterTableTypes.RENAME, tableName, null, replicationSpec, false, expectView, null); + this.newName = newName; + } + + @Explain(displayName = "New TableName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getNewName() { + return newName; + } + + @Override + public boolean mayNeedWriteId() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java new file mode 100644 index 0000000000..4345c9e6f5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; + +/** + * Operation process of renaming a table. + */ +public class AlterTableRenameOperation extends AbstractAlterTableOperation { + private final AlterTableRenameDesc desc; + + public AlterTableRenameOperation(DDLOperationContext context, AlterTableRenameDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + String[] names = Utilities.getDbTableName(desc.getTableName()); + if (Utils.isBootstrapDumpInProgress(context.getDb(), names[0])) { + LOG.error("DDLTask: Rename Table not allowed as bootstrap dump in progress"); + throw new HiveException("Rename Table: Not allowed as bootstrap dump in progress"); + } + + return super.execute(); + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + table.setDbName(Utilities.getDatabaseName(desc.getNewName())); + table.setTableName(Utilities.getTableName(desc.getNewName())); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java new file mode 100644 index 0000000000..47c8722dd2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... SET OWNER ... commands. + */ +@Explain(displayName = "Set Owner", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSetOwnerDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSetOwnerDesc.class, AlterTableSetOwnerOperation.class); + } + + private final PrincipalDesc ownerPrincipal; + + public AlterTableSetOwnerDesc(String tableName, PrincipalDesc ownerPrincipal) throws SemanticException { + super(AlterTableTypes.OWNER, tableName, null, null, false, false, null); + this.ownerPrincipal = ownerPrincipal; + } + + @Explain(displayName = "Owner Principal", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public PrincipalDesc getOwnerPrincipal() { + return ownerPrincipal; + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerOperation.java new file mode 100644 index 0000000000..eb7be24bba --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerOperation.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of setting the owner of a table. + */ +public class AlterTableSetOwnerOperation extends AbstractAlterTableOperation { + private final AlterTableSetOwnerDesc desc; + + public AlterTableSetOwnerOperation(DDLOperationContext context, AlterTableSetOwnerDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + if (desc.getOwnerPrincipal() != null) { + table.setOwner(desc.getOwnerPrincipal().getName()); + table.setOwnerType(desc.getOwnerPrincipal().getType()); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java new file mode 100644 index 0000000000..cfdffb73de --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... SET TBLPROPERTIES ... commands. + */ +@Explain(displayName = "Set Properties", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSetPropertiesDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSetPropertiesDesc.class, AlterTableSetPropertiesOperation.class); + } + + private final boolean isExplicitStatsUpdate; + private final boolean isFullAcidConversion; + private final EnvironmentContext environmentContext; + + public AlterTableSetPropertiesDesc(String tableName, Map partitionSpec, + ReplicationSpec replicationSpec, boolean expectView, Map props, boolean isExplicitStatsUpdate, + boolean isFullAcidConversion, EnvironmentContext environmentContext) throws SemanticException { + super(AlterTableTypes.SET_PROPERTIES, tableName, partitionSpec, replicationSpec, false, expectView, props); + this.isExplicitStatsUpdate = isExplicitStatsUpdate; + this.isFullAcidConversion = isFullAcidConversion; + this.environmentContext = environmentContext; + } + + @Override + public EnvironmentContext getEnvironmentContext() { + return environmentContext; + } + + @Override + public boolean mayNeedWriteId() { + return isExplicitStatsUpdate || AcidUtils.isToInsertOnlyTable(null, getProps()) || + (AcidUtils.isTransactionalTable(getProps()) && !isFullAcidConversion); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java new file mode 100644 index 0000000000..06844b4b7b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang.BooleanUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.PartitionIterable; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; +import org.apache.hadoop.hive.ql.plan.MoveWork; + +import com.google.common.collect.Lists; + +/** + * Operation process of setting properties of a table. + */ +public class AlterTableSetPropertiesOperation extends AbstractAlterTableOperation { + private final AlterTableSetPropertiesDesc desc; + + public AlterTableSetPropertiesOperation(DDLOperationContext context, AlterTableSetPropertiesDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + if (StatsSetupConst.USER.equals(environmentContext.getProperties().get(StatsSetupConst.STATS_GENERATED))) { + environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); + } + + if (partition != null) { + partition.getTPartition().getParameters().putAll(desc.getProps()); + } else { + boolean isFromMmTable = AcidUtils.isInsertOnlyTable(table.getParameters()); + Boolean isToMmTable = AcidUtils.isToInsertOnlyTable(table, desc.getProps()); + if (isFromMmTable && BooleanUtils.isTrue(isToMmTable)) { + if (!HiveConf.getBoolVar(context.getConf(), ConfVars.HIVE_MM_ALLOW_ORIGINALS)) { + List> mmTasks = generateAddMmTasks(table, desc.getWriteId()); + for (Task mmTask : mmTasks) { + context.getTask().addDependentTask(mmTask); + } + } else { + if (!table.getPartitionKeys().isEmpty()) { + PartitionIterable parts = new PartitionIterable(context.getDb(), table, null, + HiveConf.getIntVar(context.getConf(), ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + for (Partition part : parts) { + checkMmLb(part); + } + } else { + checkMmLb(table); + } + } + } else if (isFromMmTable && !BooleanUtils.isFalse(isToMmTable)) { + throw new HiveException("Cannot convert an ACID table to non-ACID"); + } + + // Converting to/from external table + String externalProp = desc.getProps().get("EXTERNAL"); + if (externalProp != null) { + if (Boolean.parseBoolean(externalProp) && table.getTableType() == TableType.MANAGED_TABLE) { + table.setTableType(TableType.EXTERNAL_TABLE); + } else if (!Boolean.parseBoolean(externalProp) && table.getTableType() == TableType.EXTERNAL_TABLE) { + table.setTableType(TableType.MANAGED_TABLE); + } + } + + table.getTTable().getParameters().putAll(desc.getProps()); + } + } + + + private List> generateAddMmTasks(Table table, Long writeId) throws HiveException { + // We will move all the files in the table/partition directories into the first MM + // directory, then commit the first write ID. + if (writeId == null) { + throw new HiveException("Internal error - write ID not set for MM conversion"); + } + + List sources = new ArrayList<>(); + List targets = new ArrayList<>(); + + int stmtId = 0; + String mmDir = AcidUtils.deltaSubdir(writeId, writeId, stmtId); + + if (!table.getPartitionKeys().isEmpty()) { + PartitionIterable parts = new PartitionIterable(context.getDb(), table, null, + HiveConf.getIntVar(context.getConf(), ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + for (Partition part : parts) { + checkMmLb(part); + Path source = part.getDataLocation(); + Path target = new Path(source, mmDir); + sources.add(source); + targets.add(target); + Utilities.FILE_OP_LOGGER.trace("Will move " + source + " to " + target); + } + } else { + checkMmLb(table); + Path source = table.getDataLocation(); + Path target = new Path(source, mmDir); + sources.add(source); + targets.add(target); + Utilities.FILE_OP_LOGGER.trace("Will move " + source + " to " + target); + } + + // Don't set inputs and outputs - the locks have already been taken so it's pointless. + MoveWork mw = new MoveWork(null, null, null, null, false); + mw.setMultiFilesDesc(new LoadMultiFilesDesc(sources, targets, true, null, null)); + return Lists.>newArrayList(TaskFactory.get(mw)); + } + + private void checkMmLb(Table table) throws HiveException { + if (!table.isStoredAsSubDirectories()) { + return; + } + // TODO [MM gap?]: by design; no-one seems to use LB tables. They will work, but not convert. + // It's possible to work around this by re-creating and re-inserting the table. + throw new HiveException("Converting list bucketed tables stored as subdirectories " + + " to MM is not supported. Please re-create a table in the desired format."); + } + + private void checkMmLb(Partition partition) throws HiveException { + if (!partition.isStoredAsSubDirectories()) { + return; + } + throw new HiveException("Converting list bucketed tables stored as subdirectories " + + " to MM is not supported. Please re-create a table in the desired format."); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java new file mode 100644 index 0000000000..ed19f398d7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... UNSET TBLPROPERTIES [IF EXISTS] ... commands. + */ +@Explain(displayName = "Unset Properties", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableUnsetPropertiesDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableUnsetPropertiesDesc.class, AlterTableSetPropertiesOperation.class); + } + + private final boolean isExplicitStatsUpdate; + private final EnvironmentContext environmentContext; + + public AlterTableUnsetPropertiesDesc(String tableName, Map partitionSpec, + ReplicationSpec replicationSpec, boolean expectView, Map props, boolean isExplicitStatsUpdate, + EnvironmentContext environmentContext) throws SemanticException { + super(AlterTableTypes.UNSET_PROPERTIES, tableName, partitionSpec, replicationSpec, false, expectView, props); + this.isExplicitStatsUpdate = isExplicitStatsUpdate; + this.environmentContext = environmentContext; + } + + @Override + public EnvironmentContext getEnvironmentContext() { + return environmentContext; + } + + @Override + public boolean mayNeedWriteId() { + return isExplicitStatsUpdate; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesOperation.java new file mode 100644 index 0000000000..2bdabc4184 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesOperation.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc; + +import java.util.Set; + +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of unsetting properties of a table. + */ +public class AlterTableUnsetPropertiesOperation extends AbstractAlterTableOperation { + private final AlterTableSetPropertiesDesc desc; + + public AlterTableUnsetPropertiesOperation(DDLOperationContext context, AlterTableSetPropertiesDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + if (StatsSetupConst.USER.equals(environmentContext.getProperties().get(StatsSetupConst.STATS_GENERATED))) { + // drop a stats parameter, which triggers recompute stats update automatically + environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); + } + + if (partition == null) { + Set removedSet = desc.getProps().keySet(); + boolean isFromMmTable = AcidUtils.isInsertOnlyTable(table.getParameters()); + boolean isRemoved = AcidUtils.isRemovedInsertOnlyTable(removedSet); + if (isFromMmTable && isRemoved) { + throw new HiveException("Cannot convert an ACID table to non-ACID"); + } + + // Check if external table property being removed + if (removedSet.contains("EXTERNAL") && table.getTableType() == TableType.EXTERNAL_TABLE) { + table.setTableType(TableType.MANAGED_TABLE); + } + } + for (String key : desc.getProps().keySet()) { + if (partition != null) { + partition.getTPartition().getParameters().remove(key); + } else { + table.getTTable().getParameters().remove(key); + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java index 8aab47b840..ade2d26927 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java @@ -25,8 +25,8 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.hive.ql.util.DirectionUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java index 680f31096e..daa33cb237 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java index a335d0dbdc..48e573ff88 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java index af67964c73..492e447014 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java @@ -20,8 +20,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java index 11e8bf37eb..2226495be5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java index 89bbb17aec..ebf3e2ae77 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java index c918bb9870..f4cb818021 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java index 861139d41b..481bd87253 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java index 381b94f38a..70164a8edf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java index afe2b0817b..5772b300de 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java index 6a6f397ef7..4ec0ed43fd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 88ea73f8d5..e9036b97bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -24,14 +24,11 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; -import com.google.common.collect.Lists; -import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; @@ -51,13 +48,13 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.exec.ArchiveUtils.PartSpecInfo; import org.apache.hadoop.hive.ql.exec.tez.TezTask; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -69,24 +66,17 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.PartitionIterable; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; -import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; -import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; -import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc; @@ -158,17 +148,6 @@ public int execute(DriverContext driverContext) { try { db = Hive.get(conf); - AlterTableDesc alterTbl = work.getAlterTblDesc(); - if (alterTbl != null) { - if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) { - // no alter, the table is missing either due to drop/rename which follows the alter. - // or the existing table is newer than our update. - LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName()); - return 0; - } - return alterTable(db, alterTbl); - } - AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc(); if (simpleDesc != null) { if (simpleDesc.getType() == AlterTableTypes.TOUCH) { @@ -1082,130 +1061,6 @@ private int msck(Hive db, MsckDesc msckDesc) { } } - /** - * Alter a given table. - * - * @param db - * The database in question. - * @param alterTbl - * This is the table we're altering. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { - if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { - String names[] = Utilities.getDbTableName(alterTbl.getOldName()); - if (Utils.isBootstrapDumpInProgress(db, names[0])) { - LOG.error("DDLTask: Rename Table not allowed as bootstrap dump in progress"); - throw new HiveException("Rename Table: Not allowed as bootstrap dump in progress"); - } - } - - // alter the table - Table tbl = db.getTable(alterTbl.getOldName()); - - List allPartitions = null; - if (alterTbl.getPartSpec() != null) { - Map partSpec = alterTbl.getPartSpec(); - if (DDLSemanticAnalyzer.isFullSpec(tbl, partSpec)) { - allPartitions = new ArrayList(); - Partition part = db.getPartition(tbl, partSpec, false); - if (part == null) { - // User provided a fully specified partition spec but it doesn't exist, fail. - throw new HiveException(ErrorMsg.INVALID_PARTITION, - StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName()); - - } - allPartitions.add(part); - } else { - // DDLSemanticAnalyzer has already checked if partial partition specs are allowed, - // thus we should not need to check it here. - allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec()); - } - } - - // Don't change the table object returned by the metastore, as we'll mess with it's caches. - Table oldTbl = tbl; - tbl = oldTbl.copy(); - // Handle child tasks here. We could add them directly whereever we need, - // but let's make it a little bit more explicit. - if (allPartitions != null) { - // Alter all partitions - for (Partition part : allPartitions) { - addChildTasks(alterTableOrSinglePartition(alterTbl, tbl, part)); - } - } else { - // Just alter the table - addChildTasks(alterTableOrSinglePartition(alterTbl, tbl, null)); - } - - if (allPartitions == null) { - updateModifiedParameters(tbl.getTTable().getParameters(), conf); - tbl.checkValidity(conf); - } else { - for (Partition tmpPart: allPartitions) { - updateModifiedParameters(tmpPart.getParameters(), conf); - } - } - - try { - EnvironmentContext environmentContext = alterTbl.getEnvironmentContext(); - if (environmentContext == null) { - environmentContext = new EnvironmentContext(); - } - environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, alterTbl.getOp().name()); - if (allPartitions == null) { - long writeId = alterTbl.getWriteId() != null ? alterTbl.getWriteId() : 0; - if (alterTbl.getReplicationSpec() != null && - alterTbl.getReplicationSpec().isMigratingToTxnTable()) { - Long tmpWriteId = ReplUtils.getMigrationCurrentTblWriteId(conf); - if (tmpWriteId == null) { - throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); - } - writeId = tmpWriteId; - } - db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), environmentContext, - true, writeId); - } else { - // Note: this is necessary for UPDATE_STATISTICS command, that operates via ADDPROPS (why?). - // For any other updates, we don't want to do txn check on partitions when altering table. - boolean isTxn = false; - if (alterTbl.getPartSpec() != null && alterTbl.getOp() == AlterTableTypes.ADDPROPS) { - // ADDPROPS is used to add replication properties like repl.last.id, which isn't - // transactional change. In case of replication check for transactional properties - // explicitly. - Map props = alterTbl.getProps(); - if (alterTbl.getReplicationSpec() != null && alterTbl.getReplicationSpec().isInReplicationScope()) { - isTxn = (props.get(StatsSetupConst.COLUMN_STATS_ACCURATE) != null); - } else { - isTxn = true; - } - } - db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext, isTxn); - } - } catch (InvalidOperationException e) { - LOG.error("alter table: ", e); - throw new HiveException(e, ErrorMsg.GENERIC_ERROR); - } - - // This is kind of hacky - the read entity contains the old table, whereas - // the write entity - // contains the new table. This is needed for rename - both the old and the - // new table names are - // passed - // Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer. - if (allPartitions != null ) { - for (Partition tmpPart: allPartitions) { - work.getInputs().add(new ReadEntity(tmpPart)); - addIfAbsentByName(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } else { - work.getInputs().add(new ReadEntity(oldTbl)); - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - } - return 0; - } /** * There are many places where "duplicate" Read/WriteEnity objects are added. The way this was * initially implemented, the duplicate just replaced the previous object. @@ -1239,218 +1094,6 @@ private boolean addIfAbsentByName(WriteEntity newWriteEntity) { return addIfAbsentByName(newWriteEntity, work.getOutputs()); } - private void addChildTasks(List> extraTasks) { - if (extraTasks == null) { - return; - } - for (Task newTask : extraTasks) { - addDependentTask(newTask); - } - } - - private boolean isSchemaEvolutionEnabled(Table tbl) { - boolean isAcid = AcidUtils.isTablePropertyTransactional(tbl.getMetadata()); - if (isAcid || HiveConf.getBoolVar(conf, ConfVars.HIVE_SCHEMA_EVOLUTION)) { - return true; - } - return false; - } - - - private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition part) { - return (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd()); - } - - private List> alterTableOrSinglePartition(AlterTableDesc alterTbl, Table tbl, - Partition part) throws HiveException { - EnvironmentContext environmentContext = alterTbl.getEnvironmentContext(); - if (environmentContext == null) { - environmentContext = new EnvironmentContext(); - alterTbl.setEnvironmentContext(environmentContext); - } - // do not need update stats in alter table/partition operations - if (environmentContext.getProperties() == null || - environmentContext.getProperties().get(StatsSetupConst.DO_NOT_UPDATE_STATS) == null) { - environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - } - - if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { - tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName())); - tbl.setTableName(Utilities.getTableName(alterTbl.getNewName())); - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) { - return alterTableAddProps(alterTbl, tbl, part, environmentContext); - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) { - return alterTableDropProps(alterTbl, tbl, part, environmentContext); - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.OWNER) { - if (alterTbl.getOwnerPrincipal() != null) { - tbl.setOwner(alterTbl.getOwnerPrincipal().getName()); - tbl.setOwnerType(alterTbl.getOwnerPrincipal().getType()); - } - } else { - throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString()); - } - - return null; - } - - private List> alterTableDropProps(AlterTableDesc alterTbl, Table tbl, - Partition part, EnvironmentContext environmentContext) throws HiveException { - if (StatsSetupConst.USER.equals(environmentContext.getProperties() - .get(StatsSetupConst.STATS_GENERATED))) { - // drop a stats parameter, which triggers recompute stats update automatically - environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); - } - - List> result = null; - if (part == null) { - Set removedSet = alterTbl.getProps().keySet(); - boolean isFromMmTable = AcidUtils.isInsertOnlyTable(tbl.getParameters()), - isRemoved = AcidUtils.isRemovedInsertOnlyTable(removedSet); - if (isFromMmTable && isRemoved) { - throw new HiveException("Cannot convert an ACID table to non-ACID"); - } - - // Check if external table property being removed - if (removedSet.contains("EXTERNAL") && tbl.getTableType() == TableType.EXTERNAL_TABLE) { - tbl.setTableType(TableType.MANAGED_TABLE); - } - } - Iterator keyItr = alterTbl.getProps().keySet().iterator(); - while (keyItr.hasNext()) { - if (part != null) { - part.getTPartition().getParameters().remove(keyItr.next()); - } else { - tbl.getTTable().getParameters().remove(keyItr.next()); - } - } - return result; - } - - private void checkMmLb(Table tbl) throws HiveException { - if (!tbl.isStoredAsSubDirectories()) { - return; - } - // TODO [MM gap?]: by design; no-one seems to use LB tables. They will work, but not convert. - // It's possible to work around this by re-creating and re-inserting the table. - throw new HiveException("Converting list bucketed tables stored as subdirectories " - + " to MM is not supported. Please re-create a table in the desired format."); - } - - private void checkMmLb(Partition part) throws HiveException { - if (!part.isStoredAsSubDirectories()) { - return; - } - throw new HiveException("Converting list bucketed tables stored as subdirectories " - + " to MM is not supported. Please re-create a table in the desired format."); - } - - private List> generateAddMmTasks(Table tbl, Long writeId) throws HiveException { - // We will move all the files in the table/partition directories into the first MM - // directory, then commit the first write ID. - List srcs = new ArrayList<>(), tgts = new ArrayList<>(); - if (writeId == null) { - throw new HiveException("Internal error - write ID not set for MM conversion"); - } - - int stmtId = 0; - String mmDir = AcidUtils.deltaSubdir(writeId, writeId, stmtId); - - Hive db = getHive(); - if (tbl.getPartitionKeys().size() > 0) { - PartitionIterable parts = new PartitionIterable(db, tbl, null, - HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - Iterator partIter = parts.iterator(); - while (partIter.hasNext()) { - Partition part = partIter.next(); - checkMmLb(part); - Path src = part.getDataLocation(), tgt = new Path(src, mmDir); - srcs.add(src); - tgts.add(tgt); - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("Will move " + src + " to " + tgt); - } - } - } else { - checkMmLb(tbl); - Path src = tbl.getDataLocation(), tgt = new Path(src, mmDir); - srcs.add(src); - tgts.add(tgt); - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("Will move " + src + " to " + tgt); - } - } - // Don't set inputs and outputs - the locks have already been taken so it's pointless. - MoveWork mw = new MoveWork(null, null, null, null, false); - mw.setMultiFilesDesc(new LoadMultiFilesDesc(srcs, tgts, true, null, null)); - return Lists.>newArrayList(TaskFactory.get(mw)); - } - - private List> alterTableAddProps(AlterTableDesc alterTbl, Table tbl, - Partition part, EnvironmentContext environmentContext) throws HiveException { - if (StatsSetupConst.USER.equals(environmentContext.getProperties() - .get(StatsSetupConst.STATS_GENERATED))) { - environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); - } - List> result = null; - if (part != null) { - part.getTPartition().getParameters().putAll(alterTbl.getProps()); - } else { - boolean isFromMmTable = AcidUtils.isInsertOnlyTable(tbl.getParameters()); - Boolean isToMmTable = AcidUtils.isToInsertOnlyTable(tbl, alterTbl.getProps()); - if (isToMmTable != null) { - if (!isFromMmTable && isToMmTable) { - if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_MM_ALLOW_ORIGINALS)) { - result = generateAddMmTasks(tbl, alterTbl.getWriteId()); - } else { - if (tbl.getPartitionKeys().size() > 0) { - Hive db = getHive(); - PartitionIterable parts = new PartitionIterable(db, tbl, null, - HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - Iterator partIter = parts.iterator(); - while (partIter.hasNext()) { - Partition part0 = partIter.next(); - checkMmLb(part0); - } - } else { - checkMmLb(tbl); - } - } - } else if (isFromMmTable && !isToMmTable) { - throw new HiveException("Cannot convert an ACID table to non-ACID"); - } - } - - // Converting to/from external table - String externalProp = alterTbl.getProps().get("EXTERNAL"); - if (externalProp != null) { - if (Boolean.parseBoolean(externalProp) && tbl.getTableType() == TableType.MANAGED_TABLE) { - tbl.setTableType(TableType.EXTERNAL_TABLE); - } else if (!Boolean.parseBoolean(externalProp) && tbl.getTableType() == TableType.EXTERNAL_TABLE) { - tbl.setTableType(TableType.MANAGED_TABLE); - } - } - - tbl.getTTable().getParameters().putAll(alterTbl.getProps()); - } - return result; - } - - /** - * Update last_modified_by and last_modified_time parameters in parameter map. - * - * @param params - * Parameters. - * @param conf - * HiveConf of session - */ - private boolean updateModifiedParameters(Map params, HiveConf conf) throws HiveException { - String user = null; - user = SessionState.getUserFromAuthenticator(); - params.put("last_modified_by", user); - params.put("last_modified_time", Long.toString(System.currentTimeMillis() / 1000)); - return true; - } - /** * Check if the given serde is valid. */ @@ -1477,45 +1120,6 @@ public String getName() { return "DDL"; } - /** - * Validate if the given table/partition is eligible for update - * - * @param db Database. - * @param tableName Table name of format db.table - * @param partSpec Partition spec for the partition - * @param replicationSpec Replications specification - * - * @return boolean true if allow the operation - * @throws HiveException - */ - private boolean allowOperationInReplicationScope(Hive db, String tableName, - Map partSpec, ReplicationSpec replicationSpec) throws HiveException { - if ((null == replicationSpec) || (!replicationSpec.isInReplicationScope())) { - // Always allow the operation if it is not in replication scope. - return true; - } - // If the table/partition exist and is older than the event, then just apply - // the event else noop. - Table existingTable = db.getTable(tableName, false); - if ((existingTable != null) - && replicationSpec.allowEventReplacementInto(existingTable.getParameters())) { - // Table exists and is older than the update. Now, need to ensure if update allowed on the - // partition. - if (partSpec != null) { - Partition existingPtn = db.getPartition(existingTable, partSpec, false); - return ((existingPtn != null) - && replicationSpec.allowEventReplacementInto(existingPtn.getParameters())); - } - - // Replacement is allowed as the existing table is older than event - return true; - } - - // The table is missing either due to drop/rename which follows the operation. - // Or the existing table is newer than our update. So, don't allow the update. - return false; - } - private int remFirstIncPendFlag(Hive hive, ReplRemoveFirstIncLoadPendFlagDesc desc) throws HiveException, TException { String dbNameOrPattern = desc.getDatabaseName(); String tableNameOrPattern = desc.getTableName(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 13de791fb3..441d1eae74 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.ddl.database.AlterDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; @@ -47,7 +48,6 @@ import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; @@ -280,13 +280,11 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa HashMap mapProp = new HashMap<>(); mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replState); - AlterTableDesc alterTblDesc = new AlterTableDesc( - AlterTableDesc.AlterTableTypes.ADDPROPS, new ReplicationSpec(replState, replState)); - alterTblDesc.setProps(mapProp); - alterTblDesc.setOldName(StatsUtils.getFullyQualifiedTableName(dbName, tableName)); - alterTblDesc.setPartSpec((HashMap) partSpec); + String fqTableName = StatsUtils.getFullyQualifiedTableName(dbName, tableName); + AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(fqTableName, partSpec, + new ReplicationSpec(replState, replState), false, mapProp, false, false, null); - Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf); + Task updateReplIdTask = TaskFactory.get(new DDLWork2(inputs, outputs, alterTblDesc), conf); // Link the update repl state task with dependency collection task if (preCursor != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index f9f13e1a4c..8627ba7d08 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; @@ -34,9 +36,7 @@ import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -59,7 +59,6 @@ import static org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration.TableMigrationOption.MANAGED; - public class ReplUtils { public static final String LAST_REPL_ID_KEY = "hive.repl.last.repl.id"; @@ -144,14 +143,10 @@ HashMap mapProp = new HashMap<>(); mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot); - AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableDesc.AlterTableTypes.ADDPROPS); - alterTblDesc.setProps(mapProp); - alterTblDesc.setOldName( - StatsUtils.getFullyQualifiedTableName(tableDesc.getDatabaseName(), tableDesc.getTableName())); - if (partSpec != null) { - alterTblDesc.setPartSpec(partSpec); - } - return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc), conf); + String fqTableName = StatsUtils.getFullyQualifiedTableName(tableDesc.getDatabaseName(), tableDesc.getTableName()); + AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(fqTableName, partSpec, null, false, + mapProp, false, false, null); + return TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), alterTblDesc), conf); } public static boolean replCkptStatus(String dbName, Map props, String dumpRoot) diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java index 7f8f9a7631..e412f9482b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java @@ -20,10 +20,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.metadata.DummyPartition; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import java.io.Serializable; @@ -195,7 +195,7 @@ public boolean isTempURI() { * @param op Operation type from the alter table description * @return the write type this should use. */ - public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTypes op) { + public static WriteType determineAlterTableWriteType(AlterTableTypes op) { switch (op) { case RENAME_COLUMN: case CLUSTERED_BY: @@ -203,7 +203,7 @@ public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTy case NOT_CLUSTERED: case SET_FILE_FORMAT: case SET_SERDE: - case DROPPROPS: + case UNSET_PROPERTIES: case REPLACE_COLUMNS: case ARCHIVE: case UNARCHIVE: @@ -223,7 +223,7 @@ public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTy case ADDPARTITION: case SET_SERDE_PROPS: - case ADDPROPS: + case SET_PROPERTIES: case UPDATESTATS: return WriteType.DDL_SHARED; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java index 76415cf7e2..cd7b69dc8a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.exec.StatsTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -49,8 +50,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.ExportWork; import org.apache.hadoop.hive.ql.session.SessionState; @@ -189,12 +188,11 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { // do it after populating temp table so that it's written as non-transactional table but // update props before export so that export archive metadata has these props. This way when // IMPORT is done for this archive and target table doesn't exist, it will be created as Acid. - AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableDesc.AlterTableTypes.ADDPROPS); Map mapProps = new HashMap<>(); mapProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString()); - alterTblDesc.setProps(mapProps); - alterTblDesc.setOldName(newTableName); - addExportTask(rootTasks, exportTask, TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); + AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(newTableName, null, null, false, + mapProps, false, false, null); + addExportTask(rootTasks, exportTask, TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); // Now make a task to drop temp table // {@link DDLSemanticAnalyzer#analyzeDropTable(ASTNode ast, TableType expectedType) @@ -254,7 +252,7 @@ private StringBuilder generateExportQuery(List partCols, ASTNode to * Makes the exportTask run after all other tasks of the "insert into T ..." are done. */ private void addExportTask(List> rootTasks, - Task exportTask, Task alterTable) { + Task exportTask, Task alterTable) { for (Task t : rootTasks) { if (t.getNumChild() <= 0) { //todo: ConditionalTask#addDependentTask(Task) doesn't do the right thing: HIVE-18978 diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index a58ac2ffe9..f8817d4785 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -93,6 +93,7 @@ import org.apache.hadoop.hive.ql.ddl.process.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.ddl.process.ShowTransactionsDesc; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; import org.apache.hadoop.hive.ql.ddl.table.column.AlterTableAddColumnsDesc; import org.apache.hadoop.hive.ql.ddl.table.column.AlterTableChangeColumnDesc; import org.apache.hadoop.hive.ql.ddl.table.column.AlterTableReplaceColumnsDesc; @@ -110,6 +111,10 @@ import org.apache.hadoop.hive.ql.ddl.table.lock.LockTableDesc; import org.apache.hadoop.hive.ql.ddl.table.lock.ShowLocksDesc; import org.apache.hadoop.hive.ql.ddl.table.lock.UnlockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableRenameDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetOwnerDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableUnsetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAlterPartitionDesc; @@ -173,8 +178,6 @@ import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactory; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; @@ -683,7 +686,7 @@ private void analyzeAlterTableUpdateStats(ASTNode ast, String tblName, Map tableParams = tab.getTTable().getParameters(); + for (String currKey : mapProp.keySet()) { + if (!tableParams.containsKey(currKey)) { + String errorMsg = "The following property " + currKey + " does not exist in " + tab.getTableName(); + throw new SemanticException( + ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg)); + } + } } + + alterTblDesc = new AlterTableUnsetPropertiesDesc(tableName, partSpec, null, expectView, mapProp, + isExplicitStatsUpdate, environmentContext); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, alterTblDesc.getType(), isToTxn); + ddlWork = new DDLWork2(getInputs(), getOutputs(), alterTblDesc); } else { addPropertyReadEntry(mapProp, inputs); - alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, partSpec, expectView); + boolean isAcidConversion = isToTxn && AcidUtils.isFullAcidTable(mapProp) + && !AcidUtils.isFullAcidTable(getTable(qualified, true)); + alterTblDesc = new AlterTableSetPropertiesDesc(tableName, partSpec, null, expectView, mapProp, + isExplicitStatsUpdate, isAcidConversion, environmentContext); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, alterTblDesc.getType(), isToTxn); + ddlWork = new DDLWork2(getInputs(), getOutputs(), alterTblDesc); } - alterTblDesc.setProps(mapProp); - alterTblDesc.setEnvironmentContext(environmentContext); - alterTblDesc.setOldName(tableName); - - - - boolean isToTxn = AcidUtils.isTablePropertyTransactional(mapProp) - || mapProp.containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, isToTxn); - // This special handling is because we cannot generate write ID for full ACID conversion, - // it will break the weird 10000001-write-ID logic that is currently in use. However, we do - // want to generate a write ID for prop changes for existing txn tables, or MM conversion. - boolean isAcidConversion = isToTxn && AcidUtils.isFullAcidTable(mapProp) - && !AcidUtils.isFullAcidTable(getTable(qualified, true)); - - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), alterTblDesc); if (isToTxn) { - alterTblDesc.setIsFullAcidConversion(isAcidConversion); - setAcidDdlDesc(alterTblDesc); ddlWork.setNeedLock(true); // Hmm... why don't many other operations here need locks? } - if (changeStatsSucceeded) { - Table table = getTable(qualified, true); - if (AcidUtils.isTransactionalTable(table)) { - alterTblDesc.setIsExplicitStatsUpdate(true); - setAcidDdlDesc(alterTblDesc); - } + if (isToTxn || isExplicitStatsUpdate) { + setAcidDdlDesc(alterTblDesc); } rootTasks.add(TaskFactory.get(ddlWork)); @@ -1864,19 +1869,6 @@ private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, Map partSpec, - AlterTableTypes op) throws SemanticException { - addInputsOutputsAlterTable(tableName, partSpec, (AlterTableDesc)null, op, false); - } - - private void addInputsOutputsAlterTable(String tableName, Map partSpec, - AlterTableDesc desc, boolean doForceExclusive) throws SemanticException { - addInputsOutputsAlterTable(tableName, partSpec, desc, desc.getOp(), doForceExclusive); - } - - private void addInputsOutputsAlterTable(String tableName, Map partSpec, - AlterTableDesc desc) throws SemanticException { - addInputsOutputsAlterTable(tableName, partSpec, desc, desc.getOp(), false); - } - - private void addInputsOutputsAlterTable(String tableName, Map partSpec, - AlterTableDesc desc, AlterTableTypes op, boolean doForceExclusive) throws SemanticException { - boolean isCascade = desc != null && desc.getIsCascade(); - boolean alterPartitions = partSpec != null && !partSpec.isEmpty(); - //cascade only occurs at table level then cascade to partition level - if (isCascade && alterPartitions) { - throw new SemanticException( - ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName()); - } - - Table tab = getTable(tableName, true); - // cascade only occurs with partitioned table - if (isCascade && !tab.isPartitioned()) { - throw new SemanticException( - ErrorMsg.ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED); - } - - // Determine the lock type to acquire - WriteEntity.WriteType writeType = doForceExclusive - ? WriteType.DDL_EXCLUSIVE : determineAlterTableWriteType(tab, desc, op); - - if (!alterPartitions) { - inputs.add(new ReadEntity(tab)); - alterTableOutput = new WriteEntity(tab, writeType); - outputs.add(alterTableOutput); - //do not need the lock for partitions since they are covered by the table lock - if (isCascade) { - for (Partition part : getPartitions(tab, partSpec, false)) { - outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - } else { - ReadEntity re = new ReadEntity(tab); - // In the case of altering a table for its partitions we don't need to lock the table - // itself, just the partitions. But the table will have a ReadEntity. So mark that - // ReadEntity as no lock. - re.noLockNeeded(); - inputs.add(re); - - if (isFullSpec(tab, partSpec)) { - // Fully specified partition spec - Partition part = getPartition(tab, partSpec, true); - outputs.add(new WriteEntity(part, writeType)); - } else { - // Partial partition spec supplied. Make sure this is allowed. - if (!AlterTableDesc.doesAlterTableTypeSupportPartialPartitionSpec(op)) { - throw new SemanticException( - ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName()); - } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) { - throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED); - } - - for (Partition part : getPartitions(tab, partSpec, true)) { - outputs.add(new WriteEntity(part, writeType)); - } - } - } - - if (desc != null) { - validateAlterTableType(tab, op, desc.getExpectView()); - - // validate Unset Non Existed Table Properties - if (op == AlterTableDesc.AlterTableTypes.DROPPROPS && !desc.getIsDropIfExists()) { - Map tableParams = tab.getTTable().getParameters(); - for (String currKey : desc.getProps().keySet()) { - if (!tableParams.containsKey(currKey)) { - String errorMsg = - "The following property " + currKey + - " does not exist in " + tab.getTableName(); - throw new SemanticException( - ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg)); - } - } - } - } - } - // For the time while all the alter table operations are getting migrated there is a duplication of this method here private void addInputsOutputsAlterTable(String tableName, Map partSpec, AbstractAlterTableDesc desc, AlterTableTypes op, boolean doForceExclusive) throws SemanticException { boolean isCascade = desc != null && desc.isCascade(); @@ -2029,7 +1929,7 @@ private void addInputsOutputsAlterTable(String tableName, Map pa outputs.add(new WriteEntity(part, writeType)); } else { // Partial partition spec supplied. Make sure this is allowed. - if (!AlterTableDesc.doesAlterTableTypeSupportPartialPartitionSpec(op)) { + if (!AlterTableTypes.SUPPORT_PARTIAL_PARTITION_SPEC.contains(op)) { throw new SemanticException( ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName()); } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) { @@ -2044,20 +1944,6 @@ private void addInputsOutputsAlterTable(String tableName, Map pa if (desc != null) { validateAlterTableType(tab, op, desc.expectView()); - - // validate Unset Non Existed Table Properties -/* if (op == AlterTableDesc.AlterTableTypes.DROPPROPS && !desc.getIsDropIfExists()) { - Map tableParams = tab.getTTable().getParameters(); - for (String currKey : desc.getProps().keySet()) { - if (!tableParams.containsKey(currKey)) { - String errorMsg = - "The following property " + currKey + - " does not exist in " + tab.getTableName(); - throw new SemanticException( - ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg)); - } - } - }*/ } } @@ -2072,8 +1958,8 @@ private void analyzeAlterTableOwner(ASTNode ast, String tableName) throws Semant throw new SemanticException("Owner name can't be null in alter table set owner command"); } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, ownerPrincipal); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); + AlterTableSetOwnerDesc alterTblDesc = new AlterTableSetOwnerDesc(tableName, ownerPrincipal); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc), conf)); } private void analyzeAlterTableLocation(ASTNode ast, String tableName, Map partSpec) @@ -2205,7 +2091,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, mergeDesc.setLbCtx(lbCtx); - addInputsOutputsAlterTable(tableName, partSpec, AlterTableTypes.MERGEFILES); + addInputsOutputsAlterTable(tableName, partSpec, null, AlterTableTypes.MERGEFILES, false); DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc); ddlWork.setNeedLock(true); Task mergeTask = TaskFactory.get(ddlWork); @@ -3251,14 +3137,13 @@ private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expec String sourceName = getDotName(source); String targetName = getDotName(target); - AlterTableDesc alterTblDesc = new AlterTableDesc(sourceName, targetName, expectView, null); + AlterTableRenameDesc alterTblDesc = new AlterTableRenameDesc(sourceName, null, expectView, targetName); Table table = getTable(sourceName, true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } - addInputsOutputsAlterTable(sourceName, null, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + addInputsOutputsAlterTable(sourceName, null, alterTblDesc, alterTblDesc.getType(), false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTNode ast, @@ -3777,7 +3662,7 @@ private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) if (partSpecs.size() == 0) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( getDotName(qualified), null, - AlterTableDesc.AlterTableTypes.TOUCH); + AlterTableTypes.TOUCH); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); @@ -3786,7 +3671,7 @@ private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) for (Map partSpec : partSpecs) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( getDotName(qualified), partSpec, - AlterTableDesc.AlterTableTypes.TOUCH); + AlterTableTypes.TOUCH); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 027ef93fc9..f55dce3988 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -103,6 +103,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableUnsetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.PreInsertTableDesc; import org.apache.hadoop.hive.ql.ddl.view.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; @@ -195,8 +196,6 @@ import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec; import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType; import org.apache.hadoop.hive.ql.plan.AggregationDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; @@ -6948,13 +6947,11 @@ private void genPartnCols(String dest, Operator input, QB qb, private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException { String qTableName = DDLSemanticAnalyzer.getDotName(new String[] { dbName, tableName }); - AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, null, false); HashMap mapProp = new HashMap<>(); mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null); - alterTblDesc.setOldName(qTableName); - alterTblDesc.setProps(mapProp); - alterTblDesc.setDropIfExists(true); - this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); + AlterTableUnsetPropertiesDesc alterTblDesc = new AlterTableUnsetPropertiesDesc(qTableName, null, null, false, + mapProp, false, null); + this.rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java index 53d998200c..ee6ee8b6a6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java @@ -19,13 +19,13 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableRenameDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.stats.StatsUtils; import java.io.Serializable; @@ -66,11 +66,10 @@ if (ReplUtils.isTableMigratingToTransactional(context.hiveConf, tableObjAfter)) { replicationSpec.setMigratingToTxnTable(); } - AlterTableDesc renameTableDesc = new AlterTableDesc( - oldName, newName, false, replicationSpec); + AlterTableRenameDesc renameTableDesc = new AlterTableRenameDesc(oldName, replicationSpec, false, newName); renameTableDesc.setWriteId(msg.getWriteId()); - Task renameTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, renameTableDesc), context.hiveConf); + Task renameTableTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, renameTableDesc), context.hiveConf); context.log.debug("Added rename table task : {}:{}->{}", renameTableTask.getId(), oldName, newName); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java deleted file mode 100644 index 9aa7e73fa7..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import org.apache.hadoop.hive.ql.io.AcidUtils; - -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.Explain.Level; -import com.google.common.collect.ImmutableList; -import java.io.Serializable; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * AlterTableDesc. - * - */ -@Explain(displayName = "Alter Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class AlterTableDesc extends DDLDesc implements Serializable, DDLDesc.DDLDescWithWriteId { - private static final long serialVersionUID = 1L; - - /** - * alterTableTypes. - * - */ - public static enum AlterTableTypes { - RENAME("rename"), ADD_COLUMNS("add columns"), REPLACE_COLUMNS("replace columns"), - ADDPROPS("add props"), DROPPROPS("drop props"), SET_SERDE("set serde"), SET_SERDE_PROPS("set serde props"), - SET_FILE_FORMAT("add fileformat"), CLUSTERED_BY("clustered by"), NOT_SORTED("not sorted"), - NOT_CLUSTERED("not clustered"), - RENAME_COLUMN("rename column"), ADDPARTITION("add partition"), TOUCH("touch"), ARCHIVE("archieve"), - UNARCHIVE("unarchieve"), SET_LOCATION("set location"), - DROPPARTITION("drop partition"), - RENAMEPARTITION("rename partition"), // Note: used in RenamePartitionDesc, not here. - SKEWED_BY("skewed by"), NOT_SKEWED("not skewed"), - SET_SKEWED_LOCATION("alter skew location"), INTO_BUCKETS("alter bucket number"), - ALTERPARTITION("alter partition"), // Note: this is never used in AlterTableDesc. - COMPACT("compact"), - TRUNCATE("truncate"), MERGEFILES("merge files"), DROP_CONSTRAINT("drop constraint"), - ADD_CONSTRAINT("add constraint"), - UPDATE_COLUMNS("update columns"), OWNER("set owner"), - UPDATESTATS("update stats"); // Note: used in ColumnStatsUpdateWork, not here. - ; - - private final String name; - private AlterTableTypes(String name) { this.name = name; } - public String getName() { return name; } - - public static final List nonNativeTableAllowedTypes = - ImmutableList.of(ADDPROPS, DROPPROPS, ADD_COLUMNS); - } - - public static enum ProtectModeType { - NO_DROP, OFFLINE, READ_ONLY, NO_DROP_CASCADE - } - - public static final Set alterTableTypesWithPartialSpec = - new HashSet(); - - static { - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADD_COLUMNS); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.REPLACE_COLUMNS); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.RENAME_COLUMN); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDPROPS); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.DROPPROPS); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.SET_SERDE); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.SET_SERDE_PROPS); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.SET_FILE_FORMAT); - } - - AlterTableTypes op; - String oldName; - String newName; - Map props; - - boolean expectView; - HashMap partSpec; - boolean protectModeEnable; - ProtectModeType protectModeType; - boolean isTurnOffSkewed = false; - boolean isDropIfExists = false; - boolean isCascade = false; - EnvironmentContext environmentContext; - ReplicationSpec replicationSpec; - private Long writeId = null; - PrincipalDesc ownerPrincipal; - private boolean isExplicitStatsUpdate, isFullAcidConversion; - - public AlterTableDesc() { - } - - /** - * @param oldName - * old name of the table - * @param newName - * new name of the table - * @param expectView - * Flag to denote if current table can be a view - * @param replicationSpec - * Replication specification with current event ID - * @throws SemanticException - */ - public AlterTableDesc(String oldName, String newName, boolean expectView, ReplicationSpec replicationSpec) throws SemanticException { - op = AlterTableTypes.RENAME; - setOldName(oldName); - this.newName = newName; - this.expectView = expectView; - this.replicationSpec = replicationSpec; - } - - /** - * @param alterType - * type of alter op - * @param replicationSpec - * Replication specification with current event ID - */ - public AlterTableDesc(AlterTableTypes alterType, ReplicationSpec replicationSpec) { - op = alterType; - this.replicationSpec = replicationSpec; - } - - /** - * @param alterType - * type of alter op - */ - public AlterTableDesc(AlterTableTypes alterType) { - this(alterType, null, false); - } - - /** - * @param alterType - * type of alter op - * @param expectView - * Flag to denote if current table can be a view - * @param partSpec - * Partition specifier with map of key and values. - */ - public AlterTableDesc(AlterTableTypes alterType, HashMap partSpec, boolean expectView) { - op = alterType; - this.partSpec = partSpec; - this.expectView = expectView; - } - - public AlterTableDesc(String tableName, PrincipalDesc ownerPrincipal) { - op = AlterTableTypes.OWNER; - this.oldName = tableName; - this.ownerPrincipal = ownerPrincipal; - } - - /** - * @param ownerPrincipal the owner principal of the table - */ - public void setOwnerPrincipal(PrincipalDesc ownerPrincipal) { - this.ownerPrincipal = ownerPrincipal; - } - - @Explain(displayName="owner") - public PrincipalDesc getOwnerPrincipal() { - return this.ownerPrincipal; - } - - @Explain(displayName = "type", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getAlterTableTypeString() { - return op.getName(); - } - - /** - * @return the old name of the table - */ - @Explain(displayName = "old name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getOldName() { - return oldName; - } - - /** - * @param oldName - * the oldName to set - */ - public void setOldName(String oldName) throws SemanticException { - // Make sure we qualify the name from the outset so there's no ambiguity. - this.oldName = String.join(".", Utilities.getDbTableName(oldName)); - } - - /** - * @return the newName - */ - @Explain(displayName = "new name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getNewName() { - return newName; - } - - /** - * @param newName - * the newName to set - */ - public void setNewName(String newName) { - this.newName = newName; - } - - /** - * @return the op - */ - public AlterTableTypes getOp() { - return op; - } - - /** - * @param op - * the op to set - */ - public void setOp(AlterTableTypes op) { - this.op = op; - } - - /** - * @return the props - */ - @Explain(displayName = "properties") - public Map getProps() { - return props; - } - - /** - * @param props - * the props to set - */ - public void setProps(Map props) { - this.props = props; - } - - /** - * @return whether to expect a view being altered - */ - public boolean getExpectView() { - return expectView; - } - - /** - * @param expectView - * set whether to expect a view being altered - */ - public void setExpectView(boolean expectView) { - this.expectView = expectView; - } - - /** - * @return part specification - */ - public HashMap getPartSpec() { - return partSpec; - } - - /** - * @param partSpec - */ - public void setPartSpec(HashMap partSpec) { - this.partSpec = partSpec; - } - - public boolean isProtectModeEnable() { - return protectModeEnable; - } - - public void setProtectModeEnable(boolean protectModeEnable) { - this.protectModeEnable = protectModeEnable; - } - - public ProtectModeType getProtectModeType() { - return protectModeType; - } - - public void setProtectModeType(ProtectModeType protectModeType) { - this.protectModeType = protectModeType; - } - - /** - * @return the turnOffSkewed - */ - public boolean isTurnOffSkewed() { - return isTurnOffSkewed; - } - - /** - * @param turnOffSkewed the turnOffSkewed to set - */ - public void setTurnOffSkewed(boolean turnOffSkewed) { - this.isTurnOffSkewed = turnOffSkewed; - } - - /** - * @param isDropIfExists the isDropIfExists to set - */ - public void setDropIfExists(boolean isDropIfExists) { - this.isDropIfExists = isDropIfExists; - } - - /** - * @return isDropIfExists - */ - public boolean getIsDropIfExists() { - return isDropIfExists; - } - - /** - * @return isCascade - */ - public boolean getIsCascade() { - return isCascade; - } - - /** - * @param isCascade the isCascade to set - */ - public void setIsCascade(boolean isCascade) { - this.isCascade = isCascade; - } - - public static boolean doesAlterTableTypeSupportPartialPartitionSpec(AlterTableTypes type) { - return alterTableTypesWithPartialSpec.contains(type); - } - - public EnvironmentContext getEnvironmentContext() { - return environmentContext; - } - - public void setEnvironmentContext(EnvironmentContext environmentContext) { - this.environmentContext = environmentContext; - } - - /** - * @return what kind of replication scope this alter is running under. - * This can result in a "ALTER IF NEWER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec(){ return this.replicationSpec; } - - @Override - public void setWriteId(long writeId) { - this.writeId = writeId; - } - - @Override - public String getFullTableName() { - return getOldName(); - } - - @Override - public boolean mayNeedWriteId() { - switch (getOp()) { - case ADDPROPS: { - return isExplicitStatsUpdate || AcidUtils.isToInsertOnlyTable(null, getProps()) - || (AcidUtils.isTransactionalTable(getProps()) && !isFullAcidConversion); - } - case DROPPROPS: return isExplicitStatsUpdate; - // The check for the following ones is performed before setting AlterTableDesc into the acid field. - // These need write ID and stuff because they invalidate column stats. - case RENAME_COLUMN: - case RENAME: - case REPLACE_COLUMNS: - case ADD_COLUMNS: - case SET_LOCATION: - case UPDATE_COLUMNS: return true; - // RENAMEPARTITION is handled in RenamePartitionDesc - default: return false; - } - } - - public Long getWriteId() { - return this.writeId; - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " for " + getFullTableName(); - } - - public void setIsExplicitStatsUpdate(boolean b) { - this.isExplicitStatsUpdate = b; - } - - public void setIsFullAcidConversion(boolean b) { - this.isFullAcidConversion = b; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java index 759a14f95c..8205901285 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java @@ -22,7 +22,7 @@ import java.util.LinkedHashMap; import java.util.Map; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableTypes; /** * Contains information needed to modify a partition or a table @@ -78,11 +78,11 @@ public void setTableName(String tableName) { this.tableName = tableName; } - public AlterTableDesc.AlterTableTypes getType() { + public AlterTableTypes getType() { return type; } - public void setType(AlterTableDesc.AlterTableTypes type) { + public void setType(AlterTableTypes type) { this.type = type; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 07feae32e7..477dd080a8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -34,7 +34,6 @@ // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates. private InsertCommitHookDesc insertCommitHookDesc; - private AlterTableDesc alterTblDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; @@ -69,16 +68,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showConfDesc = showConfDesc; } - /** - * @param alterTblDesc - * alter table descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - AlterTableDesc alterTblDesc) { - this(inputs, outputs); - this.alterTblDesc = alterTblDesc; - } - /** * @param inputs * @param outputs @@ -123,14 +112,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.replSetFirstIncLoadFlagDesc = replSetFirstIncLoadFlagDesc; } - /** - * @return the alterTblDesc - */ - @Explain(displayName = "Alter Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public AlterTableDesc getAlterTblDesc() { - return alterTblDesc; - } - /** * @return information about the table/partitions we want to alter. */ diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java index e33f5e43ec..646b7c1620 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java @@ -44,8 +44,8 @@ public String ALTER_TABLE_OPERATION_TYPE = "alterTableOpType"; - // These should remain in sync with AlterTableDesc::AlterTableType enum - public List allowedAlterTypes = ImmutableList.of("ADDPROPS", "DROPPROPS"); + // These should remain in sync with AlterTableType enum + public List allowedAlterTypes = ImmutableList.of("SET_PROPERTIES", "UNSET_PROPERTIES"); String SET_LOCATION = "SET_LOCATION"; /** @@ -114,7 +114,7 @@ public void commitDropTable(Table table, boolean deleteData) public default void preAlterTable(Table table, EnvironmentContext context) throws MetaException { String alterOpType = (context == null || context.getProperties() == null) ? null : context.getProperties().get(ALTER_TABLE_OPERATION_TYPE); - // By default allow only ADDPROPS and DROPPROPS. + // By default allow only SET_PROPERTIESS and UNSET_PROPERTIES. // alterOpType is null in case of stats update. if (alterOpType != null && !allowedAlterTypes.contains(alterOpType)){ throw new MetaException(