diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java index 0acd5011cc..6a28ef0c7a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java @@ -21,31 +21,17 @@ import java.util.Map; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; -import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; -import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ASTNode; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.SemanticException; /** - * Abstract ancestor of all Alter Table analyzer. The alter table commands have this structure: + * Abstract ancestor of all Alter Table analyzer, that have this structure: * tableName command partitionSpec? */ -public abstract class AbstractAlterTableAnalyzer extends BaseSemanticAnalyzer { - // Equivalent to acidSinks, but for DDL operations that change data. - private DDLDescWithWriteId ddlDescWithWriteId; +public abstract class AbstractAlterTableAnalyzer extends AbstractBaseAlterTableAnalyzer { public AbstractAlterTableAnalyzer(QueryState queryState) throws SemanticException { super(queryState); @@ -75,122 +61,4 @@ public void analyzeInternal(ASTNode root) throws SemanticException { protected abstract void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) throws SemanticException; - - protected void setAcidDdlDesc(DDLDescWithWriteId descWithWriteId) { - if(this.ddlDescWithWriteId != null) { - throw new IllegalStateException("ddlDescWithWriteId is already set: " + this.ddlDescWithWriteId); - } - this.ddlDescWithWriteId = descWithWriteId; - } - - @Override - public DDLDescWithWriteId getAcidDdlDesc() { - return ddlDescWithWriteId; - } - - protected void addInputsOutputsAlterTable(TableName tableName, Map partitionSpec, - AbstractAlterTableDesc desc, AlterTableType op, boolean doForceExclusive) throws SemanticException { - boolean isCascade = desc != null && desc.isCascade(); - boolean alterPartitions = partitionSpec != null && !partitionSpec.isEmpty(); - //cascade only occurs at table level then cascade to partition level - if (isCascade && alterPartitions) { - throw new SemanticException(ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName()); - } - - Table table = getTable(tableName, true); - // cascade only occurs with partitioned table - if (isCascade && !table.isPartitioned()) { - throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED); - } - - // Determine the lock type to acquire - WriteEntity.WriteType writeType = doForceExclusive ? - WriteType.DDL_EXCLUSIVE : determineAlterTableWriteType(table, desc, op); - - if (!alterPartitions) { - inputs.add(new ReadEntity(table)); - WriteEntity alterTableOutput = new WriteEntity(table, writeType); - outputs.add(alterTableOutput); - //do not need the lock for partitions since they are covered by the table lock - if (isCascade) { - for (Partition part : PartitionUtils.getPartitions(db, table, partitionSpec, false)) { - outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - } else { - ReadEntity re = new ReadEntity(table); - // In the case of altering a table for its partitions we don't need to lock the table - // itself, just the partitions. But the table will have a ReadEntity. So mark that - // ReadEntity as no lock. - re.noLockNeeded(); - inputs.add(re); - - if (AlterTableUtils.isFullPartitionSpec(table, partitionSpec)) { - // Fully specified partition spec - Partition part = PartitionUtils.getPartition(db, table, partitionSpec, true); - outputs.add(new WriteEntity(part, writeType)); - } else { - // Partial partition spec supplied. Make sure this is allowed. - if (!AlterTableType.SUPPORT_PARTIAL_PARTITION_SPEC.contains(op)) { - throw new SemanticException( - ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName()); - } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) { - throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED); - } - - for (Partition part : PartitionUtils.getPartitions(db, table, partitionSpec, true)) { - outputs.add(new WriteEntity(part, writeType)); - } - } - } - - if (desc != null) { - validateAlterTableType(table, op, desc.expectView()); - } - } - - // For the time while all the alter table operations are getting migrated there is a duplication of this method here - private WriteType determineAlterTableWriteType(Table table, AbstractAlterTableDesc desc, AlterTableType op) { - boolean convertingToAcid = false; - if (desc != null && desc.getProps() != null && - Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) { - convertingToAcid = true; - } - if (!AcidUtils.isTransactionalTable(table) && convertingToAcid) { - // non-acid to transactional conversion (property itself) must be mutexed to prevent concurrent writes. - // See HIVE-16688 for use cases. - return WriteType.DDL_EXCLUSIVE; - } - return WriteEntity.determineAlterTableWriteType(op); - } - - protected void validateAlterTableType(Table table, AlterTableType op, boolean expectView) - throws SemanticException { - if (table.isView()) { - if (!expectView) { - throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg()); - } - - switch (op) { - case ADDPARTITION: - case DROPPARTITION: - case RENAMEPARTITION: - case ADDPROPS: - case DROPPROPS: - case RENAME: - // allow this form - break; - default: - throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString())); - } - } else { - if (expectView) { - throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg()); - } - } - if (table.isNonNative() && !AlterTableType.NON_NATIVE_TABLE_ALLOWED.contains(op)) { - throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.format( - AlterTableType.NON_NATIVE_TABLE_ALLOWED.toString(), table.getTableName())); - } - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java new file mode 100644 index 0000000000..dcf484260e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Abstract ancestor of all Alter Table analyzer. + */ +public abstract class AbstractBaseAlterTableAnalyzer extends BaseSemanticAnalyzer { + // Equivalent to acidSinks, but for DDL operations that change data. + private DDLDescWithWriteId ddlDescWithWriteId; + + public AbstractBaseAlterTableAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + protected void setAcidDdlDesc(DDLDescWithWriteId descWithWriteId) { + if(this.ddlDescWithWriteId != null) { + throw new IllegalStateException("ddlDescWithWriteId is already set: " + this.ddlDescWithWriteId); + } + this.ddlDescWithWriteId = descWithWriteId; + } + + @Override + public DDLDescWithWriteId getAcidDdlDesc() { + return ddlDescWithWriteId; + } + + protected void addInputsOutputsAlterTable(TableName tableName, Map partitionSpec, + AbstractAlterTableDesc desc, AlterTableType op, boolean doForceExclusive) throws SemanticException { + boolean isCascade = desc != null && desc.isCascade(); + boolean alterPartitions = partitionSpec != null && !partitionSpec.isEmpty(); + //cascade only occurs at table level then cascade to partition level + if (isCascade && alterPartitions) { + throw new SemanticException(ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName()); + } + + Table table = getTable(tableName, true); + // cascade only occurs with partitioned table + if (isCascade && !table.isPartitioned()) { + throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED); + } + + // Determine the lock type to acquire + WriteEntity.WriteType writeType = doForceExclusive ? + WriteType.DDL_EXCLUSIVE : determineAlterTableWriteType(table, desc, op); + + if (!alterPartitions) { + inputs.add(new ReadEntity(table)); + WriteEntity alterTableOutput = new WriteEntity(table, writeType); + outputs.add(alterTableOutput); + //do not need the lock for partitions since they are covered by the table lock + if (isCascade) { + for (Partition part : PartitionUtils.getPartitions(db, table, partitionSpec, false)) { + outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); + } + } + } else { + ReadEntity re = new ReadEntity(table); + // In the case of altering a table for its partitions we don't need to lock the table + // itself, just the partitions. But the table will have a ReadEntity. So mark that + // ReadEntity as no lock. + re.noLockNeeded(); + inputs.add(re); + + if (AlterTableUtils.isFullPartitionSpec(table, partitionSpec)) { + // Fully specified partition spec + Partition part = PartitionUtils.getPartition(db, table, partitionSpec, true); + outputs.add(new WriteEntity(part, writeType)); + } else { + // Partial partition spec supplied. Make sure this is allowed. + if (!AlterTableType.SUPPORT_PARTIAL_PARTITION_SPEC.contains(op)) { + throw new SemanticException( + ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName()); + } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) { + throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED); + } + + for (Partition part : PartitionUtils.getPartitions(db, table, partitionSpec, true)) { + outputs.add(new WriteEntity(part, writeType)); + } + } + } + + if (desc != null) { + validateAlterTableType(table, op, desc.expectView()); + } + } + + // For the time while all the alter table operations are getting migrated there is a duplication of this method here + private WriteType determineAlterTableWriteType(Table table, AbstractAlterTableDesc desc, AlterTableType op) { + boolean convertingToAcid = false; + if (desc != null && desc.getProps() != null && + Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) { + convertingToAcid = true; + } + if (!AcidUtils.isTransactionalTable(table) && convertingToAcid) { + // non-acid to transactional conversion (property itself) must be mutexed to prevent concurrent writes. + // See HIVE-16688 for use cases. + return WriteType.DDL_EXCLUSIVE; + } + return WriteEntity.determineAlterTableWriteType(op); + } + + protected void validateAlterTableType(Table table, AlterTableType op, boolean expectView) + throws SemanticException { + if (table.isView()) { + if (!expectView) { + throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg()); + } + + switch (op) { + case ADDPARTITION: + case DROPPARTITION: + case RENAMEPARTITION: + case ADDPROPS: + case DROPPROPS: + case RENAME: + // allow this form + break; + default: + throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString())); + } + } else { + if (expectView) { + throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg()); + } + } + if (table.isNonNative() && !AlterTableType.NON_NATIVE_TABLE_ALLOWED.contains(op)) { + throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.format( + AlterTableType.NON_NATIVE_TABLE_ALLOWED.toString(), table.getTableName())); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/columnstats/AlterTableUpdateColumnStatistictAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/columnstats/AlterTableUpdateColumnStatistictAnalyzer.java new file mode 100644 index 0000000000..0ae0a1ab5f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/columnstats/AlterTableUpdateColumnStatistictAnalyzer.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.columnstats; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; + +/** + * Analyzer for update column statistics commands. + */ +@DDLType(types = {HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS, HiveParser.TOK_ALTERPARTITION_UPDATECOLSTATS}) +public class AlterTableUpdateColumnStatistictAnalyzer extends AbstractAlterTableAnalyzer { + public AlterTableUpdateColumnStatistictAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + Table table = getTable(tableName); + String columnName = getUnescapedName((ASTNode) command.getChild(0)); + Map properties = getProps((ASTNode) (command.getChild(1)).getChild(0)); + + String partitionName = getPartitionName(partitionSpec); + String columnType = getColumnType(table, columnName); + + ColumnStatsUpdateWork work = new ColumnStatsUpdateWork(partitionName, properties, table.getDbName(), + table.getTableName(), columnName, columnType); + ColumnStatsUpdateTask task = (ColumnStatsUpdateTask) TaskFactory.get(work); + // TODO: doesn't look like this path is actually ever exercised. Maybe this needs to be removed. + addInputsOutputsAlterTable(tableName, partitionSpec, null, AlterTableType.UPDATESTATS, false); + if (AcidUtils.isTransactionalTable(table)) { + setAcidDdlDesc(work); + } + + rootTasks.add(task); + } + + private String getPartitionName(Map partitionSpec) throws SemanticException { + String partitionName = null; + if (partitionSpec != null) { + try { + partitionName = Warehouse.makePartName(partitionSpec, false); + } catch (MetaException e) { + throw new SemanticException("partition " + partitionSpec.toString() + " not found"); + } + } + return partitionName; + } + + private String getColumnType(Table table, String columnName) throws SemanticException { + for (FieldSchema column : table.getCols()) { + if (columnName.equalsIgnoreCase(column.getName())) { + return column.getType(); + } + } + + throw new SemanticException("column type not found"); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerAnalyzer.java new file mode 100644 index 0000000000..2da0ee1dc0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerAnalyzer.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.owner; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; + +/** + * Analyzer for set owner commands. + */ +@DDLType(types = HiveParser.TOK_ALTERTABLE_OWNER) +public class AlterTableSetOwnerAnalyzer extends AbstractAlterTableAnalyzer { + public AlterTableSetOwnerAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpecFromFramework, ASTNode command) + throws SemanticException { + PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) command.getChild(0)); + + if (ownerPrincipal.getType() == null) { + throw new SemanticException("Owner type can't be null in alter table set owner command"); + } + + if (ownerPrincipal.getName() == null) { + throw new SemanticException("Owner name can't be null in alter table set owner command"); + } + + AlterTableSetOwnerDesc desc = new AlterTableSetOwnerDesc(tableName, ownerPrincipal); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerDesc.java index 1fb11ce7c3..2eb8f99e05 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.owner; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerOperation.java similarity index 96% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerOperation.java index 02b9b91dfc..3f165c60cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.owner; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTablePropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTablePropertiesAnalyzer.java new file mode 100644 index 0000000000..4a92098370 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTablePropertiesAnalyzer.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; + +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; + +/** + * Analyzer for setting/unsetting the properties of table like entities commands. + */ +@DDLType(types = HiveParser.TOK_ALTERTABLE_PROPERTIES) +public abstract class AbstractAlterTablePropertiesAnalyzer extends AbstractAlterTableAnalyzer { + public AbstractAlterTablePropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + Map properties = getProps((ASTNode) (command.getChild(0)).getChild(0)); + + boolean updateStats = validate(tableName, properties); + + EnvironmentContext environmentContext = null; + if (updateStats) { + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.USER); + } + + boolean isToTxn = AcidUtils.isTablePropertyTransactional(properties) || + properties.containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); + boolean isExplicitStatsUpdate = updateStats && AcidUtils.isTransactionalTable(getTable(tableName, true)); + + AbstractAlterTableDesc desc = createDesc(command, tableName, partitionSpec, properties, isToTxn, + isExplicitStatsUpdate, environmentContext); + + addInputsOutputsAlterTable(tableName, partitionSpec, desc, desc.getType(), isToTxn); + DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), desc); + if (isToTxn) { + ddlWork.setNeedLock(true); // Hmm... why don't many other operations here need locks? + } + if (isToTxn || isExplicitStatsUpdate) { + setAcidDdlDesc(desc); + } + + rootTasks.add(TaskFactory.get(ddlWork)); + } + + /** + * @return If it is executed after an update statistics command. + */ + private boolean validate(TableName tableName, Map properties) throws SemanticException { + // We need to check if the properties are valid, especially for stats. + // They might be changed via alter table .. update statistics or alter table .. set tblproperties. + // If the property is not row_count or raw_data_size, it could not be changed through update statistics. + boolean changeStats = false; + for (Entry entry : properties.entrySet()) { + // we make sure that we do not change anything if there is anything wrong. + if (entry.getKey().equals(StatsSetupConst.ROW_COUNT) || entry.getKey().equals(StatsSetupConst.RAW_DATA_SIZE)) { + try { + Long.parseLong(entry.getValue()); + changeStats = true; + } catch (Exception e) { + throw new SemanticException("AlterTable " + entry.getKey() + " failed with value " + entry.getValue()); + } + } else if (entry.getKey().equals("external") && entry.getValue().equals("true")) { + // if table is being modified to be external we need to make sure existing table + // doesn't have enabled constraint since constraints are disallowed with such tables + if (hasConstraintsEnabled(tableName.getTable())) { + throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg(String.format( + "Table: %s has constraints enabled. Please remove those constraints to change this property.", + tableName.getNotEmptyDbTable()))); + } + } else { + if (queryState.getCommandType().equals(HiveOperation.ALTERTABLE_UPDATETABLESTATS.getOperationName()) || + queryState.getCommandType().equals(HiveOperation.ALTERTABLE_UPDATEPARTSTATS.getOperationName())) { + throw new SemanticException(String.format( + "AlterTable UpdateStats %s failed because the only valid keys are %s and %s", + entry.getKey(), StatsSetupConst.ROW_COUNT, StatsSetupConst.RAW_DATA_SIZE)); + } + } + } + return changeStats; + } + + private boolean hasConstraintsEnabled(String tableName) throws SemanticException{ + NotNullConstraint notNullConstriant = null; + DefaultConstraint defaultConstraint = null; + try { + // retrieve enabled NOT NULL constraint from metastore + notNullConstriant = Hive.get().getEnabledNotNullConstraints(db.getDatabaseCurrent().getName(), tableName); + defaultConstraint = Hive.get().getEnabledDefaultConstraints(db.getDatabaseCurrent().getName(), tableName); + } catch (Exception e) { + if (e instanceof SemanticException) { + throw (SemanticException) e; + } else { + throw (new RuntimeException(e)); + } + } + return + (notNullConstriant != null && !notNullConstriant.getNotNullConstraints().isEmpty()) || + (defaultConstraint != null && !defaultConstraint.getDefaultConstraints().isEmpty()); + } + + protected abstract AbstractAlterTableDesc createDesc(ASTNode command, TableName tableName, + Map partitionSpec, Map properties, boolean isToTxn, boolean isExplicitStatsUpdate, + EnvironmentContext environmentContext) throws SemanticException; + + protected abstract boolean isView(); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTableSetPropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTableSetPropertiesAnalyzer.java new file mode 100644 index 0000000000..c36b486554 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTableSetPropertiesAnalyzer.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for setting the properties of table like entities commands. + */ +public abstract class AbstractAlterTableSetPropertiesAnalyzer extends AbstractAlterTablePropertiesAnalyzer { + public AbstractAlterTableSetPropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + protected AbstractAlterTableDesc createDesc(ASTNode command, TableName tableName, Map partitionSpec, + Map properties, boolean isToTxn, boolean isExplicitStatsUpdate, + EnvironmentContext environmentContext) throws SemanticException { + addPropertyReadEntry(properties, inputs); + boolean isAcidConversion = isToTxn && AcidUtils.isFullAcidTable(properties) + && !AcidUtils.isFullAcidTable(getTable(tableName, true)); + return new AlterTableSetPropertiesDesc(tableName, partitionSpec, null, isView(), properties, + isExplicitStatsUpdate, isAcidConversion, environmentContext); + } + + protected abstract boolean isView(); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTableUnsetPropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTableUnsetPropertiesAnalyzer.java new file mode 100644 index 0000000000..96fdf0c670 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AbstractAlterTableUnsetPropertiesAnalyzer.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for unsetting the properties of table like entities commands. + */ +public abstract class AbstractAlterTableUnsetPropertiesAnalyzer extends AbstractAlterTablePropertiesAnalyzer { + public AbstractAlterTableUnsetPropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + protected AbstractAlterTableDesc createDesc(ASTNode command, TableName tableName, Map partitionSpec, + Map properties, boolean isToTxn, boolean isExplicitStatsUpdate, + EnvironmentContext environmentContext) throws SemanticException { + boolean dropIfExists = command.getChild(1) != null; + // validate Unset Non Existed Table Properties + if (!dropIfExists) { + Table table = getTable(tableName, true); + Map tableParams = table.getTTable().getParameters(); + for (String key : properties.keySet()) { + if (!tableParams.containsKey(key)) { + String errorMsg = "The following property " + key + " does not exist in " + table.getTableName(); + throw new SemanticException(ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg)); + } + } + } + + return new AlterTableUnsetPropertiesDesc(tableName, partitionSpec, null, isView(), properties, + isExplicitStatsUpdate, environmentContext); + } + + protected abstract boolean isView(); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesAnalyzer.java new file mode 100644 index 0000000000..15b8bfc412 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesAnalyzer.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter table set properties commands. + */ +@DDLType(types = {HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveParser.TOK_ALTERTABLE_UPDATESTATS, + HiveParser.TOK_ALTERPARTITION_UPDATESTATS}) +public class AlterTableSetPropertiesAnalyzer extends AbstractAlterTableSetPropertiesAnalyzer { + + public AlterTableSetPropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean isView() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesDesc.java index 2d615a64cc..0c2099d39a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesOperation.java index ff6b08b5d5..fb29d07054 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableSetPropertiesOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; import java.util.ArrayList; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesAnalyzer.java new file mode 100644 index 0000000000..b7f587a0f5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesAnalyzer.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter table unset properties commands. + */ +@DDLType(types = HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) +public class AlterTableUnsetPropertiesAnalyzer extends AbstractAlterTableUnsetPropertiesAnalyzer { + + public AlterTableUnsetPropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean isView() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesDesc.java index bea9a365e9..a23e1a6e58 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesOperation.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesOperation.java index 31a27fc8d2..e70b45127f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/properties/AlterTableUnsetPropertiesOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.properties; import java.util.Set; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AbstractAlterTableRenameAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AbstractAlterTableRenameAnalyzer.java new file mode 100644 index 0000000000..eb2280f3e5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AbstractAlterTableRenameAnalyzer.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.rename; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for rename table like entities commands. + */ +public abstract class AbstractAlterTableRenameAnalyzer extends AbstractAlterTableAnalyzer { + public AbstractAlterTableRenameAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + TableName target = getQualifiedTableName((ASTNode) command.getChild(0)); + + AlterTableRenameDesc desc = new AlterTableRenameDesc(tableName, null, isView(), target.getNotEmptyDbTable()); + Table table = getTable(tableName.getNotEmptyDbTable(), true); + if (AcidUtils.isTransactionalTable(table)) { + setAcidDdlDesc(desc); + } + addInputsOutputsAlterTable(tableName, null, desc, desc.getType(), false); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + } + + protected abstract boolean isView(); +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameAnalyzer.java new file mode 100644 index 0000000000..cfba48b86b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameAnalyzer.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.rename; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for rename table commands. + */ +@DDLType(types = HiveParser.TOK_ALTERTABLE_RENAME) +public class AlterTableRenameAnalyzer extends AbstractAlterTableRenameAnalyzer { + public AlterTableRenameAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + protected boolean isView() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameDesc.java index 091c146940..5c1ce893a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.rename; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameOperation.java similarity index 88% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameOperation.java index 73ea400dcc..f06776c19b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/rename/AlterTableRenameOperation.java @@ -16,11 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.rename; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; @@ -37,8 +37,8 @@ public AlterTableRenameOperation(DDLOperationContext context, AlterTableRenameDe @Override public int execute() throws HiveException { - String[] names = Utilities.getDbTableName(desc.getDbTableName()); - if (Utils.isBootstrapDumpInProgress(context.getDb(), names[0])) { + TableName tableName = HiveTableName.of(desc.getDbTableName()); + if (Utils.isBootstrapDumpInProgress(context.getDb(), tableName.getDb())) { LOG.error("DDLTask: Rename Table not allowed as bootstrap dump in progress"); throw new HiveException("Rename Table: Not allowed as bootstrap dump in progress"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchAnalyzer.java new file mode 100644 index 0000000000..61f8f1dd25 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchAnalyzer.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.touch; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for touch commands. + * + * Rewrite the metadata for one or more partitions in a table. Useful when an external process modifies files on HDFS + * and you want the pre/post hooks to be fired for the specified partition. + */ +@DDLType(types = HiveParser.TOK_ALTERTABLE_TOUCH) +public class AlterTableTouchAnalyzer extends AbstractAlterTableAnalyzer { + public AlterTableTouchAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpecFromFramework, ASTNode command) + throws SemanticException { + Table table = getTable(tableName); + validateAlterTableType(table, AlterTableType.TOUCH, false); + inputs.add(new ReadEntity(table)); + + List> partitionSpecs = getPartitionSpecs(table, command); + + if (partitionSpecs.isEmpty()) { + AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), null); + outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK)); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + } else { + PartitionUtils.addTablePartsOutputs(db, outputs, table, partitionSpecs, false, WriteEntity.WriteType.DDL_NO_LOCK); + for (Map partitionSpec : partitionSpecs) { + AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), partitionSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableTouchDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchDesc.java similarity index 96% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableTouchDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchDesc.java index 207f5b443f..243656afc7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableTouchDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.touch; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableTouchOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableTouchOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchOperation.java index 8d8ac20350..a58bc5a8fe 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableTouchOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/touch/AlterTableTouchOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.touch; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableAnalyzer.java new file mode 100644 index 0000000000..89bf7e2e19 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableAnalyzer.java @@ -0,0 +1,327 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.misc.truncate; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.AbstractBaseAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableUtils; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.exec.ArchiveUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.io.RCFileInputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.HiveTableName; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.BasicStatsWork; +import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; +import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.MoveWork; +import org.apache.hadoop.hive.ql.plan.StatsWork; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.mapred.InputFormat; + +/** + * Analyzer for truncate table commands. + */ +@DDLType(types = HiveParser.TOK_TRUNCATETABLE) +public class TruncateTableAnalyzer extends AbstractBaseAlterTableAnalyzer { + public TruncateTableAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode root) throws SemanticException { + ASTNode tableNode = (ASTNode) root.getChild(0); // TOK_TABLE_PARTITION + String tableNameString = getUnescapedName((ASTNode) tableNode.getChild(0)); + Table table = getTable(tableNameString, true); + TableName tableName = HiveTableName.of(table); + checkTruncateEligibility(root, tableNode, tableNameString, table); + + Map partitionSpec = getPartSpec((ASTNode) tableNode.getChild(1)); + addTruncateTableOutputs(tableNode, table, partitionSpec); + + Task truncateTask = null; + ASTNode colNamesNode = (ASTNode) root.getFirstChildWithType(HiveParser.TOK_TABCOLNAME); + if (colNamesNode == null) { + truncateTask = getTruncateTaskWithoutColumnNames(tableName, partitionSpec, table); + } else { + truncateTask = getTruncateTaskWithColumnNames(tableNode, tableName, table, partitionSpec, colNamesNode); + } + + rootTasks.add(truncateTask); + } + + private void checkTruncateEligibility(ASTNode ast, ASTNode root, String tableName, Table table) + throws SemanticException { + boolean isForce = ast.getFirstChildWithType(HiveParser.TOK_FORCE) != null; + if (!isForce) { + if (table.getTableType() != TableType.MANAGED_TABLE && + (table.getParameters().getOrDefault(MetaStoreUtils.EXTERNAL_TABLE_PURGE, "FALSE")).equalsIgnoreCase("FALSE")) { + throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName)); + } + } + + if (table.isNonNative()) { + throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); //TODO + } + + if (!table.isPartitioned() && root.getChildCount() > 1) { + throw new SemanticException(ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE.format(tableName)); + } + } + + private void addTruncateTableOutputs(ASTNode root, Table table, Map partitionSpec) + throws SemanticException { + if (partitionSpec == null) { + if (!table.isPartitioned()) { + outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE)); + } else { + for (Partition partition : PartitionUtils.getPartitions(db, table, null, false)) { + outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); + } + } + } else { + if (AlterTableUtils.isFullPartitionSpec(table, partitionSpec)) { + validatePartSpec(table, partitionSpec, (ASTNode) root.getChild(1), conf, true); + Partition partition = PartitionUtils.getPartition(db, table, partitionSpec, true); + outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); + } else { + validatePartSpec(table, partitionSpec, (ASTNode) root.getChild(1), conf, false); + for (Partition partition : PartitionUtils.getPartitions(db, table, partitionSpec, false)) { + outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); + } + } + } + } + + private Task getTruncateTaskWithoutColumnNames(TableName tableName, Map partitionSpec, + Table table) { + TruncateTableDesc desc = new TruncateTableDesc(tableName, partitionSpec, null, table); + if (desc.mayNeedWriteId()) { + setAcidDdlDesc(desc); + } + + DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), desc); + return TaskFactory.get(ddlWork); + } + + private Task getTruncateTaskWithColumnNames(ASTNode root, TableName tableName, Table table, + Map partitionSpec, ASTNode columnNamesNode) throws SemanticException { + try { + // It would be possible to support this, but this is such a pointless command. + if (AcidUtils.isInsertOnlyTable(table.getParameters())) { + throw new SemanticException("Truncating MM table columns not presently supported"); + } + + List columnNames = getColumnNames(columnNamesNode); + + if (table.isPartitioned()) { + return truncatePartitionedTableWithColumnNames(root, tableName, table, partitionSpec, columnNames); + } else { + return truncateUnpartitionedTableWithColumnNames(root, tableName, table, partitionSpec, columnNames); + } + } catch (HiveException e) { + throw new SemanticException(e); + } + } + + @SuppressWarnings("rawtypes") + private Task truncatePartitionedTableWithColumnNames(ASTNode root, TableName tableName, Table table, + Map partitionSpec, List columnNames) throws HiveException, SemanticException { + Partition partition = db.getPartition(table, partitionSpec, false); + + Path tablePath = table.getPath(); + Path partitionPath = partition.getDataLocation(); + + // if the table is in a different dfs than the partition, replace the partition's dfs with the table's dfs. + Path oldPartitionLocation = partitionPath; + Path newPartitionLocation = new Path(tablePath.toUri().getScheme(), tablePath.toUri().getAuthority(), + partitionPath.toUri().getPath()); + + List columns = partition.getCols(); + List bucketColumns = partition.getBucketCols(); + List listBucketColumns = partition.getSkewedColNames(); + + Class inputFormatClass = partition.getInputFormatClass(); + boolean isArchived = ArchiveUtils.isArchived(partition); + ListBucketingCtx lbCtx = constructListBucketingCtx(partition.getSkewedColNames(), partition.getSkewedColValues(), + partition.getSkewedColValueLocationMaps(), partition.isStoredAsSubDirectories()); + boolean isListBucketed = partition.isStoredAsSubDirectories(); + + return createTasks(root, tableName, table, partitionSpec, columnNames, bucketColumns, inputFormatClass, + isArchived, newPartitionLocation, oldPartitionLocation, columns, lbCtx, isListBucketed, listBucketColumns); + } + + @SuppressWarnings("rawtypes") + private Task truncateUnpartitionedTableWithColumnNames(ASTNode root, TableName tableName, Table table, + Map partitionSpec, List columnNames) throws SemanticException { + // input and output are the same + Path oldPartitionLocation = table.getPath(); + Path newPartitionLocation = table.getPath(); + + List columns = table.getCols(); + List bucketColumns = table.getBucketCols(); + List listBucketColumns = table.getSkewedColNames(); + + Class inputFormatClass = table.getInputFormatClass(); + ListBucketingCtx lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(), + table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories()); + boolean isListBucketed = table.isStoredAsSubDirectories(); + + return createTasks(root, tableName, table, partitionSpec, columnNames, bucketColumns, inputFormatClass, + false, newPartitionLocation, oldPartitionLocation, columns, lbCtx, isListBucketed, listBucketColumns); + } + + @SuppressWarnings("rawtypes") + private Task createTasks(ASTNode root, TableName tableName, Table table, Map partitionSpec, + List columnNames, List bucketColumns, Class inputFormatClass, + boolean isArchived, Path newPartitionLocation, Path oldPartitionLocation, List columns, + ListBucketingCtx lbCtx, boolean isListBucketed, List listBucketColumns) throws SemanticException { + if (!inputFormatClass.equals(RCFileInputFormat.class)) { + throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg()); + } + + if (isArchived) { + throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg()); + } + + Set columnIndexes = + getColumnIndexes(columnNames, bucketColumns, columns, isListBucketed, listBucketColumns); + + addInputsOutputsAlterTable(tableName, partitionSpec, null, AlterTableType.TRUNCATE, false); + + TableDesc tableDesc = Utilities.getTableDesc(table); + Path queryTmpdir = ctx.getExternalTmpPath(newPartitionLocation); + + Task truncateTask = + createTruncateTask(tableName, table, partitionSpec, oldPartitionLocation, lbCtx, columnIndexes, queryTmpdir); + + addMoveTask(root, table, partitionSpec, oldPartitionLocation, newPartitionLocation, lbCtx, queryTmpdir, + truncateTask, tableDesc); + + return truncateTask; + } + + private Set getColumnIndexes(List columnNames, List bucketColumns, List columns, + boolean isListBucketed, List listBucketColumns) throws SemanticException { + Set columnIndexes = new HashSet(); + for (String columnName : columnNames) { + boolean found = false; + for (int columnIndex = 0; columnIndex < columns.size(); columnIndex++) { + if (columnName.equalsIgnoreCase(columns.get(columnIndex).getName())) { + columnIndexes.add(columnIndex); + found = true; + break; + } + } + + if (!found) { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName)); + } + + for (String bucketColumn : bucketColumns) { + if (bucketColumn.equalsIgnoreCase(columnName)) { + throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName)); + } + } + + if (isListBucketed) { + for (String listBucketColumn : listBucketColumns) { + if (listBucketColumn.equalsIgnoreCase(columnName)) { + throw new SemanticException(ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName)); + } + } + } + } + return columnIndexes; + } + + private Task createTruncateTask(TableName tableName, Table table, Map partitionSpec, + Path oldPartitionLocation, ListBucketingCtx lbCtx, Set columnIndexes, Path queryTmpdir) { + TruncateTableDesc desc = new TruncateTableDesc(tableName, partitionSpec, null, table, + new ArrayList(columnIndexes), oldPartitionLocation, queryTmpdir, lbCtx); + if (desc.mayNeedWriteId()) { + setAcidDdlDesc(desc); + } + + DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), desc); + ddlWork.setNeedLock(true); + return TaskFactory.get(ddlWork); + } + + private void addMoveTask(ASTNode root, Table table, Map partitionSpec, Path oldPartitionLocation, + Path newPartitionLocation, ListBucketingCtx lbCtx, Path queryTmpdir, Task truncateTask, TableDesc tableDesc) + throws SemanticException { + // Write the output to temporary directory and move it to the final location at the end + // so the operation is atomic. + LoadTableDesc loadTableDesc = + new LoadTableDesc(queryTmpdir, tableDesc, partitionSpec == null ? new HashMap<>() : partitionSpec); + loadTableDesc.setLbCtx(lbCtx); + Task moveTask = TaskFactory.get(new MoveWork(null, null, loadTableDesc, null, false)); + truncateTask.addDependentTask(moveTask); + + addStatTask(root, table, oldPartitionLocation, newPartitionLocation, loadTableDesc, moveTask); + } + + private void addStatTask(ASTNode root, Table table, Path oldPartitionLocation, Path newPartitionLocation, + LoadTableDesc loadTableDesc, Task moveTask) throws SemanticException { + // Recalculate the HDFS stats if auto gather stats is set + if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + BasicStatsWork basicStatsWork; + if (oldPartitionLocation.equals(newPartitionLocation)) { + // If we're merging to the same location, we can avoid some metastore calls + TableSpec partitionSpec = new TableSpec(db, conf, root); + basicStatsWork = new BasicStatsWork(partitionSpec); + } else { + basicStatsWork = new BasicStatsWork(loadTableDesc); + } + basicStatsWork.setNoStatsAggregator(true); + basicStatsWork.setClearAggregatorStats(true); + StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, conf); + + Task statTask = TaskFactory.get(columnStatsWork); + moveTask.addDependentTask(statTask); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableDesc.java index 04fa1b9901..6bac319182 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.truncate; import java.io.Serializable; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableOperation.java index 3c62b0a1fa..e738d4f1c3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/truncate/TruncateTableOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.misc; +package org.apache.hadoop.hive.ql.ddl.table.misc.truncate; import java.util.ArrayList; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/properties/AlterViewSetPropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/properties/AlterViewSetPropertiesAnalyzer.java new file mode 100644 index 0000000000..b3646f715a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/properties/AlterViewSetPropertiesAnalyzer.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.properties; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AbstractAlterTableSetPropertiesAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter view set properties commands. + */ +@DDLType(types = HiveParser.TOK_ALTERVIEW_PROPERTIES) +public class AlterViewSetPropertiesAnalyzer extends AbstractAlterTableSetPropertiesAnalyzer { + + public AlterViewSetPropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean isView() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/properties/AlterViewUnsetPropertiesAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/properties/AlterViewUnsetPropertiesAnalyzer.java new file mode 100644 index 0000000000..dd4dcc3944 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/properties/AlterViewUnsetPropertiesAnalyzer.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.properties; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AbstractAlterTableUnsetPropertiesAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter view unset properties commands. + */ +@DDLType(types = HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) +public class AlterViewUnsetPropertiesAnalyzer extends AbstractAlterTableUnsetPropertiesAnalyzer { + + public AlterViewUnsetPropertiesAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean isView() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/rename/AlterViewRenameAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/rename/AlterViewRenameAnalyzer.java new file mode 100644 index 0000000000..53da7c29d9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/rename/AlterViewRenameAnalyzer.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view.rename; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.misc.rename.AbstractAlterTableRenameAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for rename view commands. + */ +@DDLType(types = HiveParser.TOK_ALTERVIEW_RENAME) +public class AlterViewRenameAnalyzer extends AbstractAlterTableRenameAnalyzer { + public AlterViewRenameAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + protected boolean isView() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index d7b6eeae55..7e844d3164 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.misc.flags.ReplRemoveFirstIncLoadPendFlagDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index fc7f226d77..504c9d4048 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java index cc3c5da8ce..a71912a639 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.ddl.table.drop.DropTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.exec.StatsTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 46bb37a0c2..4f1e23d7a6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -97,8 +97,13 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TimestampLocalTZTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider; import org.apache.hadoop.security.alias.CredentialProvider; @@ -876,8 +881,64 @@ public static String getTypeStringFromAST(ASTNode typeNode) case HiveParser.TOK_UNIONTYPE: return getUnionTypeStringFromAST(typeNode); default: - return DDLSemanticAnalyzer.getTypeName(typeNode); + return getTypeName(typeNode); + } + } + + private static final Map TOKEN_TO_TYPE = new HashMap(); + + static { + TOKEN_TO_TYPE.put(HiveParser.TOK_BOOLEAN, serdeConstants.BOOLEAN_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_TINYINT, serdeConstants.TINYINT_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_SMALLINT, serdeConstants.SMALLINT_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_INT, serdeConstants.INT_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_BIGINT, serdeConstants.BIGINT_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_FLOAT, serdeConstants.FLOAT_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_DOUBLE, serdeConstants.DOUBLE_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_STRING, serdeConstants.STRING_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_CHAR, serdeConstants.CHAR_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_VARCHAR, serdeConstants.VARCHAR_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_BINARY, serdeConstants.BINARY_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_DATE, serdeConstants.DATE_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_DATETIME, serdeConstants.DATETIME_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_TIMESTAMPLOCALTZ, serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_INTERVAL_YEAR_MONTH, serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_INTERVAL_DAY_TIME, serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME); + TOKEN_TO_TYPE.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME); + } + + private static String getTypeName(ASTNode node) throws SemanticException { + int token = node.getType(); + String typeName; + + // datetime type isn't currently supported + if (token == HiveParser.TOK_DATETIME) { + throw new SemanticException(ErrorMsg.UNSUPPORTED_TYPE.getMsg()); + } + + switch (token) { + case HiveParser.TOK_CHAR: + CharTypeInfo charTypeInfo = ParseUtils.getCharTypeInfo(node); + typeName = charTypeInfo.getQualifiedName(); + break; + case HiveParser.TOK_VARCHAR: + VarcharTypeInfo varcharTypeInfo = ParseUtils.getVarcharTypeInfo(node); + typeName = varcharTypeInfo.getQualifiedName(); + break; + case HiveParser.TOK_TIMESTAMPLOCALTZ: + TimestampLocalTZTypeInfo timestampLocalTZTypeInfo = + TypeInfoFactory.getTimestampTZTypeInfo(null); + typeName = timestampLocalTZTypeInfo.getQualifiedName(); + break; + case HiveParser.TOK_DECIMAL: + DecimalTypeInfo decTypeInfo = ParseUtils.getDecimalTypeTypeInfo(node); + typeName = decTypeInfo.getQualifiedName(); + break; + default: + typeName = TOKEN_TO_TYPE.get(token); } + return typeName; } private static String getStructTypeStringFromAST(ASTNode typeNode) diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java deleted file mode 100644 index ba019c7553..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ /dev/null @@ -1,818 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.parse; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -import org.antlr.runtime.tree.CommonTree; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; -import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; -import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableRenameDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetOwnerDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableTouchDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableUnsetPropertiesDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; -import org.apache.hadoop.hive.ql.exec.ArchiveUtils; -import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskFactory; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; -import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.io.RCFileInputFormat; -import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; -import org.apache.hadoop.hive.ql.plan.BasicStatsWork; -import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; -import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; -import org.apache.hadoop.hive.ql.plan.LoadTableDesc; -import org.apache.hadoop.hive.ql.plan.MoveWork; -import org.apache.hadoop.hive.ql.plan.StatsWork; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TimestampLocalTZTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; -import org.apache.hadoop.mapred.InputFormat; - -/** - * DDLSemanticAnalyzer. - * - */ -public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { - private static final Map TokenToTypeName = new HashMap(); - - // Equivalent to acidSinks, but for DDL operations that change data. - private DDLDescWithWriteId ddlDescWithWriteId; - - static { - TokenToTypeName.put(HiveParser.TOK_BOOLEAN, serdeConstants.BOOLEAN_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_TINYINT, serdeConstants.TINYINT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_SMALLINT, serdeConstants.SMALLINT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_INT, serdeConstants.INT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_BIGINT, serdeConstants.BIGINT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_FLOAT, serdeConstants.FLOAT_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DOUBLE, serdeConstants.DOUBLE_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_STRING, serdeConstants.STRING_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_CHAR, serdeConstants.CHAR_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_VARCHAR, serdeConstants.VARCHAR_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_BINARY, serdeConstants.BINARY_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DATE, serdeConstants.DATE_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DATETIME, serdeConstants.DATETIME_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_TIMESTAMPLOCALTZ, serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_INTERVAL_YEAR_MONTH, serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_INTERVAL_DAY_TIME, serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME); - TokenToTypeName.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME); - } - - public static String getTypeName(ASTNode node) throws SemanticException { - int token = node.getType(); - String typeName; - - // datetime type isn't currently supported - if (token == HiveParser.TOK_DATETIME) { - throw new SemanticException(ErrorMsg.UNSUPPORTED_TYPE.getMsg()); - } - - switch (token) { - case HiveParser.TOK_CHAR: - CharTypeInfo charTypeInfo = ParseUtils.getCharTypeInfo(node); - typeName = charTypeInfo.getQualifiedName(); - break; - case HiveParser.TOK_VARCHAR: - VarcharTypeInfo varcharTypeInfo = ParseUtils.getVarcharTypeInfo(node); - typeName = varcharTypeInfo.getQualifiedName(); - break; - case HiveParser.TOK_TIMESTAMPLOCALTZ: - TimestampLocalTZTypeInfo timestampLocalTZTypeInfo = - TypeInfoFactory.getTimestampTZTypeInfo(null); - typeName = timestampLocalTZTypeInfo.getQualifiedName(); - break; - case HiveParser.TOK_DECIMAL: - DecimalTypeInfo decTypeInfo = ParseUtils.getDecimalTypeTypeInfo(node); - typeName = decTypeInfo.getQualifiedName(); - break; - default: - typeName = TokenToTypeName.get(token); - } - return typeName; - } - - public DDLSemanticAnalyzer(QueryState queryState) throws SemanticException { - this(queryState, createHiveDB(queryState.getConf())); - } - - public DDLSemanticAnalyzer(QueryState queryState, Hive db) throws SemanticException { - super(queryState, db); - } - - @Override - public void analyzeInternal(ASTNode input) throws SemanticException { - - ASTNode ast = input; - switch (ast.getType()) { - case HiveParser.TOK_ALTERTABLE: { - ast = (ASTNode) input.getChild(1); - final TableName tName = - getQualifiedTableName((ASTNode) input.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); - // TODO CAT - for now always use the default catalog. Eventually will want to see if - // the user specified a catalog - Map partSpec = null; - ASTNode partSpecNode = (ASTNode)input.getChild(2); - if (partSpecNode != null) { - // We can use alter table partition rename to convert/normalize the legacy partition - // column values. In so, we should not enable the validation to the old partition spec - // passed in this command. - if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { - partSpec = getPartSpec(partSpecNode); - } else { - partSpec = getValidatedPartSpec(getTable(tName), partSpecNode, conf, false); - } - } - - if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAME) { - analyzeAlterTableRename(tName, ast, false); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_TOUCH) { - analyzeAlterTableTouch(tName, ast); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) { - analyzeAlterTableProps(tName, null, ast, false, false); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) { - analyzeAlterTableProps(tName, null, ast, false, true); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UPDATESTATS || - ast.getType() == HiveParser.TOK_ALTERPARTITION_UPDATESTATS) { - analyzeAlterTableProps(tName, partSpec, ast, false, false); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS || - ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_UPDATECOLSTATS){ - analyzeAlterTableUpdateStats(ast, tName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_OWNER) { - analyzeAlterTableOwner(ast, tName); - } - break; - } - case HiveParser.TOK_TRUNCATETABLE: - analyzeTruncateTable(ast); - break; - case HiveParser.TOK_ALTERVIEW: { - final TableName tName = getQualifiedTableName((ASTNode) ast.getChild(0)); - ast = (ASTNode) ast.getChild(1); - if (ast.getType() == HiveParser.TOK_ALTERVIEW_PROPERTIES) { - analyzeAlterTableProps(tName, null, ast, true, false); - } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) { - analyzeAlterTableProps(tName, null, ast, true, true); - } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_RENAME) { - analyzeAlterTableRename(tName, ast, true); - } - break; - } - default: - throw new SemanticException("Unsupported command: " + ast); - } - if (fetchTask != null && !rootTasks.isEmpty()) { - rootTasks.get(rootTasks.size() - 1).setFetchSource(true); - } - } - - private void analyzeAlterTableUpdateStats(ASTNode ast, TableName tblName, Map partSpec) - throws SemanticException { - String colName = getUnescapedName((ASTNode) ast.getChild(0)); - Map mapProp = getProps((ASTNode) (ast.getChild(1)).getChild(0)); - - Table tbl = getTable(tblName); - String partName = null; - if (partSpec != null) { - try { - partName = Warehouse.makePartName(partSpec, false); - } catch (MetaException e) { - throw new SemanticException("partition " + partSpec.toString() - + " not found"); - } - } - - String colType = null; - List cols = tbl.getCols(); - for (FieldSchema col : cols) { - if (colName.equalsIgnoreCase(col.getName())) { - colType = col.getType(); - break; - } - } - - if (colType == null) { - throw new SemanticException("column type not found"); - } - - ColumnStatsUpdateWork columnStatsUpdateWork = - new ColumnStatsUpdateWork(partName, mapProp, tbl.getDbName(), tbl.getTableName(), colName, colType); - ColumnStatsUpdateTask cStatsUpdateTask = (ColumnStatsUpdateTask) TaskFactory - .get(columnStatsUpdateWork); - // TODO: doesn't look like this path is actually ever exercised. Maybe this needs to be removed. - addInputsOutputsAlterTable(tblName, partSpec, null, AlterTableType.UPDATESTATS, false); - if (AcidUtils.isTransactionalTable(tbl)) { - setAcidDdlDesc(columnStatsUpdateWork); - } - rootTasks.add(cStatsUpdateTask); - } - - private void analyzeTruncateTable(ASTNode ast) throws SemanticException { - ASTNode root = (ASTNode) ast.getChild(0); // TOK_TABLE_PARTITION - final String tableName = getUnescapedName((ASTNode) root.getChild(0)); - - Table table = getTable(tableName, true); - final TableName tName = HiveTableName.of(table); - checkTruncateEligibility(ast, root, tableName, table); - - Map partSpec = getPartSpec((ASTNode) root.getChild(1)); - addTruncateTableOutputs(root, table, partSpec); - - Task truncateTask = null; - - // Is this a truncate column command - ASTNode colNamesNode = (ASTNode) ast.getFirstChildWithType(HiveParser.TOK_TABCOLNAME); - if (colNamesNode == null) { - truncateTask = getTruncateTaskWithoutColumnNames(tName, partSpec, table); - } else { - truncateTask = getTruncateTaskWithColumnNames(root, tName, table, partSpec, colNamesNode); - } - - rootTasks.add(truncateTask); - } - - private void checkTruncateEligibility(ASTNode ast, ASTNode root, String tableName, Table table) - throws SemanticException { - boolean isForce = ast.getFirstChildWithType(HiveParser.TOK_FORCE) != null; - if (!isForce) { - if (table.getTableType() != TableType.MANAGED_TABLE && - (table.getParameters().getOrDefault(MetaStoreUtils.EXTERNAL_TABLE_PURGE, "FALSE")) - .equalsIgnoreCase("FALSE")) { - throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName)); - } - } - if (table.isNonNative()) { - throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); //TODO - } - if (!table.isPartitioned() && root.getChildCount() > 1) { - throw new SemanticException(ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE.format(tableName)); - } - } - - private void addTruncateTableOutputs(ASTNode root, Table table, Map partSpec) - throws SemanticException { - if (partSpec == null) { - if (!table.isPartitioned()) { - outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } else { - for (Partition partition : PartitionUtils.getPartitions(db, table, null, false)) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } - } - } else { - if (isFullSpec(table, partSpec)) { - validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, true); - Partition partition = PartitionUtils.getPartition(db, table, partSpec, true); - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } else { - validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, false); - for (Partition partition : PartitionUtils.getPartitions(db, table, partSpec, false)) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } - } - } - } - - private Task getTruncateTaskWithoutColumnNames(TableName tableName, Map partSpec, Table table) { - TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec, null, table); - if (truncateTblDesc.mayNeedWriteId()) { - setAcidDdlDesc(truncateTblDesc); - } - - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc); - return TaskFactory.get(ddlWork); - } - - private Task getTruncateTaskWithColumnNames(ASTNode root, TableName tName, Table table, - Map partSpec, ASTNode colNamesNode) throws SemanticException { - try { - List columnNames = getColumnNames(colNamesNode); - - // It would be possible to support this, but this is such a pointless command. - if (AcidUtils.isInsertOnlyTable(table.getParameters())) { - throw new SemanticException("Truncating MM table columns not presently supported"); - } - - List bucketCols = null; - Class inputFormatClass = null; - boolean isArchived = false; - Path newTblPartLoc = null; - Path oldTblPartLoc = null; - List cols = null; - ListBucketingCtx lbCtx = null; - boolean isListBucketed = false; - List listBucketColNames = null; - - if (table.isPartitioned()) { - Partition part = db.getPartition(table, partSpec, false); - - Path tabPath = table.getPath(); - Path partPath = part.getDataLocation(); - - // if the table is in a different dfs than the partition, - // replace the partition's dfs with the table's dfs. - newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri() - .getAuthority(), partPath.toUri().getPath()); - - oldTblPartLoc = partPath; - - cols = part.getCols(); - bucketCols = part.getBucketCols(); - inputFormatClass = part.getInputFormatClass(); - isArchived = ArchiveUtils.isArchived(part); - lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), - part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories()); - isListBucketed = part.isStoredAsSubDirectories(); - listBucketColNames = part.getSkewedColNames(); - } else { - // input and output are the same - oldTblPartLoc = table.getPath(); - newTblPartLoc = table.getPath(); - cols = table.getCols(); - bucketCols = table.getBucketCols(); - inputFormatClass = table.getInputFormatClass(); - lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(), - table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories()); - isListBucketed = table.isStoredAsSubDirectories(); - listBucketColNames = table.getSkewedColNames(); - } - - // throw a HiveException for non-rcfile. - if (!inputFormatClass.equals(RCFileInputFormat.class)) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg()); - } - - // throw a HiveException if the table/partition is archived - if (isArchived) { - throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg()); - } - - Set columnIndexes = new HashSet(); - for (String columnName : columnNames) { - boolean found = false; - for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) { - if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) { - columnIndexes.add(columnIndex); - found = true; - break; - } - } - // Throw an exception if the user is trying to truncate a column which doesn't exist - if (!found) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName)); - } - // Throw an exception if the table/partition is bucketed on one of the columns - for (String bucketCol : bucketCols) { - if (bucketCol.equalsIgnoreCase(columnName)) { - throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName)); - } - } - if (isListBucketed) { - for (String listBucketCol : listBucketColNames) { - if (listBucketCol.equalsIgnoreCase(columnName)) { - throw new SemanticException( - ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName)); - } - } - } - } - - Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); - TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tName, partSpec, null, table, - new ArrayList(columnIndexes), oldTblPartLoc, queryTmpdir, lbCtx); - if (truncateTblDesc.mayNeedWriteId()) { - setAcidDdlDesc(truncateTblDesc); - } - - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc); - Task truncateTask = TaskFactory.get(ddlWork); - - addInputsOutputsAlterTable(tName, partSpec, null, AlterTableType.TRUNCATE, false); - ddlWork.setNeedLock(true); - TableDesc tblDesc = Utilities.getTableDesc(table); - // Write the output to temporary directory and move it to the final location at the end - // so the operation is atomic. - LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, partSpec == null ? new HashMap<>() : partSpec); - ltd.setLbCtx(lbCtx); - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false)); - truncateTask.addDependentTask(moveTsk); - - // Recalculate the HDFS stats if auto gather stats is set - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - BasicStatsWork basicStatsWork; - if (oldTblPartLoc.equals(newTblPartLoc)) { - // If we're merging to the same location, we can avoid some metastore calls - TableSpec tablepart = new TableSpec(this.db, conf, root); - basicStatsWork = new BasicStatsWork(tablepart); - } else { - basicStatsWork = new BasicStatsWork(ltd); - } - basicStatsWork.setNoStatsAggregator(true); - basicStatsWork.setClearAggregatorStats(true); - StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, conf); - - Task statTask = TaskFactory.get(columnStatsWork); - moveTsk.addDependentTask(statTask); - } - - return truncateTask; - } catch (HiveException e) { - throw new SemanticException(e); - } - } - - public static boolean isFullSpec(Table table, Map partSpec) { - for (FieldSchema partCol : table.getPartCols()) { - if (partSpec.get(partCol.getName()) == null) { - return false; - } - } - return true; - } - - private void validateAlterTableType(Table tbl, AlterTableType op) throws SemanticException { - validateAlterTableType(tbl, op, false); - } - - private void validateAlterTableType(Table tbl, AlterTableType op, boolean expectView) - throws SemanticException { - if (tbl.isView()) { - if (!expectView) { - throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg()); - } - - switch (op) { - case ADDPARTITION: - case DROPPARTITION: - case RENAMEPARTITION: - case ADDPROPS: - case DROPPROPS: - case RENAME: - // allow this form - break; - default: - throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString())); - } - } else { - if (expectView) { - throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg()); - } - } - if (tbl.isNonNative() && !AlterTableType.NON_NATIVE_TABLE_ALLOWED.contains(op)) { - throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.format( - AlterTableType.NON_NATIVE_TABLE_ALLOWED.toString(), tbl.getTableName())); - } - } - - private boolean hasConstraintsEnabled(final String tblName) throws SemanticException{ - - NotNullConstraint nnc = null; - DefaultConstraint dc = null; - try { - // retrieve enabled NOT NULL constraint from metastore - nnc = Hive.get().getEnabledNotNullConstraints( - db.getDatabaseCurrent().getName(), tblName); - dc = Hive.get().getEnabledDefaultConstraints( - db.getDatabaseCurrent().getName(), tblName); - } catch (Exception e) { - if (e instanceof SemanticException) { - throw (SemanticException) e; - } else { - throw (new RuntimeException(e)); - } - } - if((nnc != null && !nnc.getNotNullConstraints().isEmpty()) - || (dc != null && !dc.getDefaultConstraints().isEmpty())) { - return true; - } - return false; - } - - private void analyzeAlterTableProps(TableName tableName, Map partSpec, ASTNode ast, - boolean expectView, boolean isUnset) throws SemanticException { - - Map mapProp = getProps((ASTNode) (ast.getChild(0)).getChild(0)); - EnvironmentContext environmentContext = null; - // we need to check if the properties are valid, especially for stats. - // they might be changed via alter table .. update statistics or - // alter table .. set tblproperties. If the property is not row_count - // or raw_data_size, it could not be changed through update statistics - boolean changeStatsSucceeded = false; - for (Entry entry : mapProp.entrySet()) { - // we make sure that we do not change anything if there is anything - // wrong. - if (entry.getKey().equals(StatsSetupConst.ROW_COUNT) - || entry.getKey().equals(StatsSetupConst.RAW_DATA_SIZE)) { - try { - Long.parseLong(entry.getValue()); - changeStatsSucceeded = true; - } catch (Exception e) { - throw new SemanticException("AlterTable " + entry.getKey() + " failed with value " - + entry.getValue()); - } - } - // if table is being modified to be external we need to make sure existing table - // doesn't have enabled constraint since constraints are disallowed with such tables - else if (entry.getKey().equals("external") && entry.getValue().equals("true")) { - if (hasConstraintsEnabled(tableName.getTable())) { - throw new SemanticException( - ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Table: " + tableName.getDbTable() + " has constraints enabled." - + "Please remove those constraints to change this property.")); - } - } - else { - if (queryState.getCommandType() - .equals(HiveOperation.ALTERTABLE_UPDATETABLESTATS.getOperationName()) - || queryState.getCommandType() - .equals(HiveOperation.ALTERTABLE_UPDATEPARTSTATS.getOperationName())) { - throw new SemanticException("AlterTable UpdateStats " + entry.getKey() - + " failed because the only valid keys are " + StatsSetupConst.ROW_COUNT + " and " - + StatsSetupConst.RAW_DATA_SIZE); - } - } - - if (changeStatsSucceeded) { - environmentContext = new EnvironmentContext(); - environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.USER); - } - } - boolean isToTxn = AcidUtils.isTablePropertyTransactional(mapProp) - || mapProp.containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); - boolean isExplicitStatsUpdate = changeStatsSucceeded && AcidUtils.isTransactionalTable(getTable(tableName, true)); - AbstractAlterTableDesc alterTblDesc = null; - DDLWork ddlWork = null; - - if (isUnset) { - boolean dropIfExists = ast.getChild(1) != null; - // validate Unset Non Existed Table Properties - if (!dropIfExists) { - Table tab = getTable(tableName, true); - Map tableParams = tab.getTTable().getParameters(); - for (String currKey : mapProp.keySet()) { - if (!tableParams.containsKey(currKey)) { - String errorMsg = "The following property " + currKey + " does not exist in " + tab.getTableName(); - throw new SemanticException( - ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg)); - } - } - } - - alterTblDesc = new AlterTableUnsetPropertiesDesc(tableName, partSpec, null, expectView, mapProp, - isExplicitStatsUpdate, environmentContext); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, alterTblDesc.getType(), isToTxn); - ddlWork = new DDLWork(getInputs(), getOutputs(), alterTblDesc); - } else { - addPropertyReadEntry(mapProp, inputs); - boolean isAcidConversion = isToTxn && AcidUtils.isFullAcidTable(mapProp) - && !AcidUtils.isFullAcidTable(getTable(tableName, true)); - alterTblDesc = new AlterTableSetPropertiesDesc(tableName, partSpec, null, expectView, mapProp, - isExplicitStatsUpdate, isAcidConversion, environmentContext); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, alterTblDesc.getType(), isToTxn); - ddlWork = new DDLWork(getInputs(), getOutputs(), alterTblDesc); - } - if (isToTxn) { - ddlWork.setNeedLock(true); // Hmm... why don't many other operations here need locks? - } - if (isToTxn || isExplicitStatsUpdate) { - setAcidDdlDesc(alterTblDesc); - } - - rootTasks.add(TaskFactory.get(ddlWork)); - } - - private void setAcidDdlDesc(DDLDescWithWriteId descWithWriteId) { - if(this.ddlDescWithWriteId != null) { - throw new IllegalStateException("ddlDescWithWriteId is already set: " + this.ddlDescWithWriteId); - } - this.ddlDescWithWriteId = descWithWriteId; - } - - @Override - public DDLDescWithWriteId getAcidDdlDesc() { - return ddlDescWithWriteId; - } - - // For the time while all the alter table operations are getting migrated there is a duplication of this method here - private WriteType determineAlterTableWriteType(Table tab, AbstractAlterTableDesc desc, AlterTableType op) { - boolean convertingToAcid = false; - if (desc != null && desc.getProps() != null && - Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) { - convertingToAcid = true; - } - if(!AcidUtils.isTransactionalTable(tab) && convertingToAcid) { - //non-acid to transactional conversion (property itself) must be mutexed to prevent concurrent writes. - // See HIVE-16688 for use cases. - return WriteType.DDL_EXCLUSIVE; - } - return WriteEntity.determineAlterTableWriteType(op); - } - - private void addInputsOutputsAlterTable(TableName tableName, Map partSpec, - AbstractAlterTableDesc desc, AlterTableType op, boolean doForceExclusive) throws SemanticException { - boolean isCascade = desc != null && desc.isCascade(); - boolean alterPartitions = partSpec != null && !partSpec.isEmpty(); - //cascade only occurs at table level then cascade to partition level - if (isCascade && alterPartitions) { - throw new SemanticException( - ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName()); - } - - Table tab = getTable(tableName, true); - // cascade only occurs with partitioned table - if (isCascade && !tab.isPartitioned()) { - throw new SemanticException( - ErrorMsg.ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED); - } - - // Determine the lock type to acquire - WriteEntity.WriteType writeType = doForceExclusive - ? WriteType.DDL_EXCLUSIVE : determineAlterTableWriteType(tab, desc, op); - - if (!alterPartitions) { - inputs.add(new ReadEntity(tab)); - WriteEntity alterTableOutput = new WriteEntity(tab, writeType); - outputs.add(alterTableOutput); - //do not need the lock for partitions since they are covered by the table lock - if (isCascade) { - for (Partition part : PartitionUtils.getPartitions(db, tab, partSpec, false)) { - outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - } else { - ReadEntity re = new ReadEntity(tab); - // In the case of altering a table for its partitions we don't need to lock the table - // itself, just the partitions. But the table will have a ReadEntity. So mark that - // ReadEntity as no lock. - re.noLockNeeded(); - inputs.add(re); - - if (isFullSpec(tab, partSpec)) { - // Fully specified partition spec - Partition part = PartitionUtils.getPartition(db, tab, partSpec, true); - outputs.add(new WriteEntity(part, writeType)); - } else { - // Partial partition spec supplied. Make sure this is allowed. - if (!AlterTableType.SUPPORT_PARTIAL_PARTITION_SPEC.contains(op)) { - throw new SemanticException( - ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName()); - } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) { - throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED); - } - - for (Partition part : PartitionUtils.getPartitions(db, tab, partSpec, true)) { - outputs.add(new WriteEntity(part, writeType)); - } - } - } - - if (desc != null) { - validateAlterTableType(tab, op, desc.expectView()); - } - } - - private void analyzeAlterTableOwner(ASTNode ast, TableName tableName) throws SemanticException { - PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) ast.getChild(0)); - - if (ownerPrincipal.getType() == null) { - throw new SemanticException("Owner type can't be null in alter table set owner command"); - } - - if (ownerPrincipal.getName() == null) { - throw new SemanticException("Owner name can't be null in alter table set owner command"); - } - - AlterTableSetOwnerDesc alterTblDesc = new AlterTableSetOwnerDesc(tableName, ownerPrincipal); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); - } - - /** - * Utility class to resolve QualifiedName - */ - static class QualifiedNameUtil { - - /** - * Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT - * ^(DOT a b) c) will generate a name of the form a.b.c - * - * @param ast - * The AST from which the qualified name has to be extracted - * @return String - */ - static public String getFullyQualifiedName(ASTNode ast) { - if (ast.getChildCount() == 0) { - return ast.getText(); - } else if (ast.getChildCount() == 2) { - return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "." - + getFullyQualifiedName((ASTNode) ast.getChild(1)); - } else if (ast.getChildCount() == 3) { - return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "." - + getFullyQualifiedName((ASTNode) ast.getChild(1)) + "." - + getFullyQualifiedName((ASTNode) ast.getChild(2)); - } else { - return null; - } - } - } - - private void analyzeAlterTableRename(TableName source, ASTNode ast, boolean expectView) - throws SemanticException { - final TableName target = getQualifiedTableName((ASTNode) ast.getChild(0)); - - AlterTableRenameDesc alterTblDesc = new AlterTableRenameDesc(source, null, expectView, target.getDbTable()); - Table table = getTable(source.getDbTable(), true); - if (AcidUtils.isTransactionalTable(table)) { - setAcidDdlDesc(alterTblDesc); - } - addInputsOutputsAlterTable(source, null, alterTblDesc, alterTblDesc.getType(), false); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); - } - - /** - * Rewrite the metadata for one or more partitions in a table. Useful when - * an external process modifies files on HDFS and you want the pre/post - * hooks to be fired for the specified partition. - * - * @param ast - * The parsed command tree. - * @throws SemanticException - * Parsing failed - */ - private void analyzeAlterTableTouch(TableName tName, CommonTree ast) throws SemanticException { - - Table tab = getTable(tName); - validateAlterTableType(tab, AlterTableType.TOUCH); - inputs.add(new ReadEntity(tab)); - - // partition name to value - List> partSpecs = getPartitionSpecs(tab, ast); - - if (partSpecs.isEmpty()) { - AlterTableTouchDesc touchDesc = new AlterTableTouchDesc(tName.getDbTable(), null); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); - } else { - PartitionUtils.addTablePartsOutputs(db, outputs, tab, partSpecs, false, WriteEntity.WriteType.DDL_NO_LOCK); - for (Map partSpec : partSpecs) { - AlterTableTouchDesc touchDesc = new AlterTableTouchDesc(tName.getDbTable(), partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); - } - } - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index f46739eb67..8b6c6b7622 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -108,8 +108,8 @@ import org.apache.hadoop.hive.ql.ddl.table.constraint.ConstraintsUtils; import org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableUnsetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.PreInsertTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableUnsetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.storage.skewed.SkewedTableUtils; import org.apache.hadoop.hive.ql.ddl.view.create.CreateViewDesc; import org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 9748fbddc2..9cb3e7044c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -83,32 +83,13 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t return new ReplicationSemanticAnalyzer(queryState); case HiveParser.TOK_REPL_STATUS: return new ReplicationSemanticAnalyzer(queryState); - case HiveParser.TOK_ALTERTABLE: { - Tree child = tree.getChild(1); - queryState.setCommandType(HiveOperation.operationForToken(child.getType())); - return new DDLSemanticAnalyzer(queryState); - } case HiveParser.TOK_ALTERVIEW: { Tree child = tree.getChild(1); - switch (child.getType()) { - case HiveParser.TOK_ALTERVIEW_PROPERTIES: - case HiveParser.TOK_ALTERVIEW_DROPPROPERTIES: - case HiveParser.TOK_ALTERVIEW_RENAME: - opType = HiveOperation.operationForToken(child.getType()); - queryState.setCommandType(opType); - return new DDLSemanticAnalyzer(queryState); - } // TOK_ALTERVIEW_AS assert child.getType() == HiveParser.TOK_QUERY; queryState.setCommandType(HiveOperation.ALTERVIEW_AS); return new SemanticAnalyzer(queryState); } - case HiveParser.TOK_MSCK: - case HiveParser.TOK_SHOWDBLOCKS: - case HiveParser.TOK_SHOWCONF: - case HiveParser.TOK_TRUNCATETABLE: - return new DDLSemanticAnalyzer(queryState); - case HiveParser.TOK_ANALYZE: return new ColumnStatsSemanticAnalyzer(queryState); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java index 82e50ff442..7a4cb93c12 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableRenameDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.rename.AlterTableRenameDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index 25e524af37..6c3a7ebb0e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.truncate.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index 35b8e0e684..2b12be4c46 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.misc.truncate.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;