diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 865aae6bca..f92478c48b 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -28,8 +28,8 @@ import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; import org.apache.hadoop.hive.ql.ddl.table.info.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablesDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.drop.AlterTableDropPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.show.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableSetLocationDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 0d64780f96..39d876802a 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -60,7 +60,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.ddl.DDLTask; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.MoveTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java index fa20f23815..c040b40d93 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/metadata/CacheMetadataAnalyzer.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; import org.apache.hadoop.hive.ql.ddl.function.AbstractFunctionAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -50,7 +51,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException { // In 2 cases out of 3, we could pass the path and type directly to metastore... if (AnalyzeCommandUtils.isPartitionLevelStats(root)) { Map partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(table, root, conf); - Partition part = getPartition(table, partSpec, true); + Partition part = PartitionUtils.getPartition(db, table, partSpec, true); desc = new CacheMetadataDesc(table.getDbName(), table.getTableName(), part.getName()); inputs.add(new ReadEntity(part)); } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java index 1adcef655f..105636e340 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; @@ -112,7 +113,7 @@ protected void addInputsOutputsAlterTable(TableName tableName, Map partitionSpec) throws SemanticException { + Set reservedPartitionValues = new HashSet<>(); + // Partition can't have this name + reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME)); + reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME)); + // Partition value can't end in this suffix + reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL)); + reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED)); + reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED)); + + for (Entry e : partitionSpec.entrySet()) { + for (String s : reservedPartitionValues) { + String value = e.getValue(); + if (value != null && value.contains(s)) { + throw new SemanticException(ErrorMsg.RESERVED_PART_VAL.getMsg( + "(User value: " + e.getValue() + " Reserved substring: " + s + ")")); + } + } + } + } + + public static ExprNodeGenericFuncDesc makeBinaryPredicate(String fn, ExprNodeDesc left, ExprNodeDesc right) + throws SemanticException { + return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, + FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(left, right)); + } + + public static ExprNodeGenericFuncDesc makeUnaryPredicate(String fn, ExprNodeDesc arg) throws SemanticException { + return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, + FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(arg)); + } + + public static Partition getPartition(Hive db, Table table, Map partitionSpec, boolean throwException) + throws SemanticException { + Partition partition; + try { + partition = db.getPartition(table, partitionSpec, false); + } catch (Exception e) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec), e); + } + if (partition == null && throwException) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec)); + } + return partition; + } + + public static List getPartitions(Hive db, Table table, Map partitionSpec, + boolean throwException) throws SemanticException { + List partitions; + try { + partitions = partitionSpec == null ? db.getPartitions(table) : db.getPartitions(table, partitionSpec); + } catch (Exception e) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec), e); + } + if (partitions.isEmpty() && throwException) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partitionSpec)); + } + return partitions; + } + + private static String toMessage(ErrorMsg message, Object detail) { + return detail == null ? message.getMsg() : message.getMsg(detail.toString()); + } + + /** + * Add the table partitions to be modified in the output, so that it is available for the pre-execution hook. + */ + public static void addTablePartsOutputs(Hive db, Set outputs, Table table, + List> partitionSpecs, boolean allowMany, WriteEntity.WriteType writeType) + throws SemanticException { + for (Map partitionSpec : partitionSpecs) { + List parts = null; + if (allowMany) { + try { + parts = db.getPartitions(table, partitionSpec); + } catch (HiveException e) { + LOG.error("Got HiveException during obtaining list of partitions" + StringUtils.stringifyException(e)); + throw new SemanticException(e.getMessage(), e); + } + } else { + parts = new ArrayList(); + try { + Partition p = db.getPartition(table, partitionSpec, false); + if (p != null) { + parts.add(p); + } + } catch (HiveException e) { + LOG.debug("Wrong specification" + StringUtils.stringifyException(e)); + throw new SemanticException(e.getMessage(), e); + } + } + for (Partition p : parts) { + // Don't request any locks here, as the table has already been locked. + outputs.add(new WriteEntity(p, writeType)); + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java new file mode 100644 index 0000000000..e1c8718533 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.add; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for add partition commands. + */ +abstract class AbstractAddPartitionAnalyzer extends AbstractAlterTableAnalyzer { + AbstractAddPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + Table table = getTable(tableName); + validateAlterTableType(table, AlterTableType.ADDPARTITION, expectView()); + + boolean ifNotExists = command.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS; + outputs.add(new WriteEntity(table, + /* use DDL_EXCLUSIVE to cause X lock to prevent races between concurrent add partition calls with IF NOT EXISTS. + * w/o this 2 concurrent calls to add the same partition may both add data since for transactional tables + * creating partition metadata and moving data there are 2 separate actions. */ + ifNotExists && AcidUtils.isTransactionalTable(table) ? + WriteType.DDL_EXCLUSIVE : WriteEntity.WriteType.DDL_SHARED)); + + List partitions = createPartitions(command, table, ifNotExists); + if (partitions.isEmpty()) { // nothing to do + return; + } + + AlterTableAddPartitionDesc desc = new AlterTableAddPartitionDesc(table.getDbName(), table.getTableName(), + ifNotExists, partitions); + Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(ddlTask); + + postProcess(tableName, table, desc, ddlTask); + } + + protected abstract boolean expectView(); + + private List createPartitions(ASTNode command, Table table, + boolean ifNotExists) throws SemanticException { + String currentLocation = null; + Map currentPart = null; + List partitions = new ArrayList<>(); + for (int num = ifNotExists ? 1 : 0; num < command.getChildCount(); num++) { + ASTNode child = (ASTNode) command.getChild(num); + switch (child.getToken().getType()) { + case HiveParser.TOK_PARTSPEC: + if (currentPart != null) { + partitions.add(createPartitionDesc(table, currentLocation, currentPart)); + currentLocation = null; + } + currentPart = getValidatedPartSpec(table, child, conf, true); + PartitionUtils.validatePartitions(conf, currentPart); // validate reserved values + break; + case HiveParser.TOK_PARTITIONLOCATION: + // if location specified, set in partition + if (!allowLocation()) { + throw new SemanticException("LOCATION clause illegal for view partition"); + } + currentLocation = unescapeSQLString(child.getChild(0).getText()); + inputs.add(toReadEntity(currentLocation)); + break; + default: + throw new SemanticException("Unknown child: " + child); + } + } + + if (currentPart != null) { // add the last one + partitions.add(createPartitionDesc(table, currentLocation, currentPart)); + } + + return partitions; + } + + private AlterTableAddPartitionDesc.PartitionDesc createPartitionDesc(Table table, String location, + Map partitionSpec) { + Map params = null; + if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER) && location == null) { + params = new HashMap(); + StatsSetupConst.setStatsStateForCreateTable(params, + MetaStoreUtils.getColumnNames(table.getCols()), StatsSetupConst.TRUE); + } + return new AlterTableAddPartitionDesc.PartitionDesc(partitionSpec, location, params); + } + + protected abstract boolean allowLocation(); + + protected abstract void postProcess(TableName tableName, Table table, AlterTableAddPartitionDesc desc, + Task ddlTask) throws SemanticException; +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionAnalyzer.java new file mode 100644 index 0000000000..184dced0cc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionAnalyzer.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.add; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.MoveWork; + +/** + * Analyzer for add partition commands for tables. + */ +@DDLType(type=HiveParser.TOK_ALTERTABLE_ADDPARTS) +public class AlterTableAddPartitionAnalyzer extends AbstractAddPartitionAnalyzer { + public AlterTableAddPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean expectView() { + return false; + } + + @Override + protected boolean allowLocation() { + return true; + } + + /** + * Add partition for Transactional tables needs to add (copy/rename) the data so that it lands + * in a delta_x_x/ folder in the partition dir. + */ + @Override + protected void postProcess(TableName tableName, Table table, AlterTableAddPartitionDesc desc, Task ddlTask) + throws SemanticException { + if (!AcidUtils.isTransactionalTable(table)) { + return; + } + + Long writeId = null; + int stmtId = 0; + + for (AlterTableAddPartitionDesc.PartitionDesc partitonDesc : desc.getPartitions()) { + if (partitonDesc.getLocation() != null) { + AcidUtils.validateAcidPartitionLocation(partitonDesc.getLocation(), conf); + if (desc.isIfNotExists()) { + //Don't add partition data if it already exists + Partition oldPart = PartitionUtils.getPartition(db, table, partitonDesc.getPartSpec(), false); + if (oldPart != null) { + continue; + } + } + + if (writeId == null) { + // so that we only allocate a writeId if actually adding data (vs. adding a partition w/o data) + try { + writeId = getTxnMgr().getTableWriteId(table.getDbName(), table.getTableName()); + } catch (LockException ex) { + throw new SemanticException("Failed to allocate the write id", ex); + } + stmtId = getTxnMgr().getStmtIdAndIncrement(); + } + LoadTableDesc loadTableWork = new LoadTableDesc(new Path(partitonDesc.getLocation()), + Utilities.getTableDesc(table), partitonDesc.getPartSpec(), + LoadTableDesc.LoadFileType.KEEP_EXISTING, //not relevant - creating new partition + writeId); + loadTableWork.setStmtId(stmtId); + loadTableWork.setInheritTableSpecs(true); + try { + partitonDesc.setLocation(new Path(table.getDataLocation(), + Warehouse.makePartPath(partitonDesc.getPartSpec())).toString()); + } catch (MetaException ex) { + throw new SemanticException("Could not determine partition path due to: " + ex.getMessage(), ex); + } + Task moveTask = TaskFactory.get( + new MoveWork(getInputs(), getOutputs(), loadTableWork, null, + true, //make sure to check format + false)); //is this right? + ddlTask.addDependentTask(moveTask); + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java index 9339144408..61af383141 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.add; import java.io.Serializable; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionOperation.java similarity index 99% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionOperation.java index 0adccf6740..6910e100b0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.add; import java.util.ArrayList; import java.util.BitSet; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterViewAddPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterViewAddPartitionAnalyzer.java new file mode 100644 index 0000000000..2e69325c3a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterViewAddPartitionAnalyzer.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.add; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for add partition commands for views. + */ +@DDLType(type=HiveParser.TOK_ALTERVIEW_ADDPARTS) +public class AlterViewAddPartitionAnalyzer extends AbstractAddPartitionAnalyzer { + public AlterViewAddPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean expectView() { + return true; + } + + @Override + protected boolean allowLocation() { + return false; + } + + private static final String VIEW_VALIDATE_QUERY = + "SELECT *%n" + + " FROM %s%n" + + " WHERE %s"; + + @Override + protected void postProcess(TableName tableName, Table table, AlterTableAddPartitionDesc desc, Task ddlTask) + throws SemanticException { + // Compile internal query to capture underlying table partition dependencies + String dbTable = HiveUtils.unparseIdentifier(tableName.getDb()) + "." + + HiveUtils.unparseIdentifier(tableName.getTable()); + + StringBuilder where = new StringBuilder(); + boolean firstOr = true; + for (AlterTableAddPartitionDesc.PartitionDesc partitionDesc : desc.getPartitions()) { + if (firstOr) { + firstOr = false; + } else { + where.append(" OR "); + } + boolean firstAnd = true; + where.append("("); + for (Map.Entry entry : partitionDesc.getPartSpec().entrySet()) { + if (firstAnd) { + firstAnd = false; + } else { + where.append(" AND "); + } + where.append(HiveUtils.unparseIdentifier(entry.getKey())); + where.append(" = '"); + where.append(HiveUtils.escapeString(entry.getValue())); + where.append("'"); + } + where.append(")"); + } + + String query = String.format(VIEW_VALIDATE_QUERY, dbTable, where.toString()); + // FIXME: is it ok to have a completely new querystate? + try (Driver driver = new Driver(QueryState.getNewQueryState(conf, queryState.getLineageState()))) { + int rc = driver.compile(query, false); + if (rc != 0) { + throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg()); + } + inputs.addAll(driver.getPlan().getInputs()); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/package-info.java new file mode 100644 index 0000000000..e117e53752 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Add partition DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.partition.add; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionAnalyzer.java new file mode 100644 index 0000000000..dfdb7653f0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionAnalyzer.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.alter; + +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for alter partition commands. + */ +@DDLType(type=HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) +public class AlterTableAlterPartitionAnalyzer extends AbstractAlterTableAnalyzer { + public AlterTableAlterPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + Table table = getTable(tableName); + validateAlterTableType(table, AlterTableType.ALTERPARTITION, false); + inputs.add(new ReadEntity(table)); + + // Alter table ... partition column ( column newtype) only takes one column at a time. + ASTNode colAst = (ASTNode) command.getChild(0); + String name = colAst.getChild(0).getText().toLowerCase(); + String type = getTypeStringFromAST((ASTNode) (colAst.getChild(1))); + String comment = (colAst.getChildCount() == 3) ? unescapeSQLString(colAst.getChild(2).getText()) : null; + + FieldSchema newCol = new FieldSchema(unescapeIdentifier(name), type, comment); + + boolean isDefined = false; + for (FieldSchema col : table.getTTable().getPartitionKeys()) { + if (col.getName().compareTo(newCol.getName()) == 0) { + isDefined = true; + } + } + if (!isDefined) { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol.getName())); + } + + AlterTableAlterPartitionDesc desc = new AlterTableAlterPartitionDesc(tableName.getNotEmptyDbTable(), newCol); + if (AcidUtils.isTransactionalTable(table)) { + setAcidDdlDesc(desc); + } + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionDesc.java index 5a8c802051..8da3bc38c7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.alter; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java index b97cef04b8..2046cbdb43 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.alter; import java.util.ArrayList; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/package-info.java new file mode 100644 index 0000000000..9a108e5770 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Alter partition DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.partition.alter; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AbstractDropPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AbstractDropPartitionAnalyzer.java new file mode 100644 index 0000000000..ae5048d527 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AbstractDropPartitionAnalyzer.java @@ -0,0 +1,278 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.drop; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.antlr.runtime.tree.CommonTree; +import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.TypeCheckCtx; +import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +import com.google.common.collect.Lists; + +/** + * Analyzer for drop partition commands. + */ +abstract class AbstractDropPartitionAnalyzer extends AbstractAlterTableAnalyzer { + AbstractDropPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + + boolean ifExists = (command.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) + || HiveConf.getBoolVar(conf, ConfVars.DROP_IGNORES_NON_EXISTENT); + // If the drop has to fail on non-existent partitions, we cannot batch expressions. + // That is because we actually have to check each separate expression for existence. + // We could do a small optimization for the case where expr has all columns and all + // operators are equality, if we assume those would always match one partition (which + // may not be true with legacy, non-normalized column values). This is probably a + // popular case but that's kinda hacky. Let's not do it for now. + boolean canGroupExprs = ifExists; + + boolean mustPurge = (command.getFirstChildWithType(HiveParser.KW_PURGE) != null); + ReplicationSpec replicationSpec = new ReplicationSpec(command); + + Table table = null; + try { + table = getTable(tableName); + } catch (SemanticException se){ + if (replicationSpec.isInReplicationScope() && + ( + (se.getCause() instanceof InvalidTableException) + || (se.getMessage().contains(ErrorMsg.INVALID_TABLE.getMsg())) + )){ + // If we're inside a replication scope, then the table not existing is not an error. + // We just return in that case, no drop needed. + return; + // TODO : the contains message check is fragile, we should refactor SemanticException to be + // queriable for error code, and not simply have a message + // NOTE : IF_EXISTS might also want to invoke this, but there's a good possibility + // that IF_EXISTS is stricter about table existence, and applies only to the ptn. + // Therefore, ignoring IF_EXISTS here. + } else { + throw se; + } + } + Map> partitionSpecs = getFullPartitionSpecs(command, table, canGroupExprs); + if (partitionSpecs.isEmpty()) { // nothing to do + return; + } + + validateAlterTableType(table, AlterTableType.DROPPARTITION, expectView()); + ReadEntity re = new ReadEntity(table); + re.noLockNeeded(); + inputs.add(re); + + addTableDropPartsOutputs(table, partitionSpecs.values(), !ifExists); + + AlterTableDropPartitionDesc desc = + new AlterTableDropPartitionDesc(tableName, partitionSpecs, mustPurge, replicationSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + } + + /** + * Get the partition specs from the tree. This stores the full specification + * with the comparator operator into the output list. + * + * @return Map of partitions by prefix length. Most of the time prefix length will + * be the same for all partition specs, so we can just OR the expressions. + */ + private Map> getFullPartitionSpecs( + CommonTree ast, Table table, boolean canGroupExprs) throws SemanticException { + String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + Map colTypes = new HashMap<>(); + for (FieldSchema fs : table.getPartitionKeys()) { + colTypes.put(fs.getName().toLowerCase(), fs.getType()); + } + + Map> result = new HashMap<>(); + for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { + Tree partSpecTree = ast.getChild(childIndex); + if (partSpecTree.getType() != HiveParser.TOK_PARTSPEC) { + continue; + } + + ExprNodeGenericFuncDesc expr = null; + Set names = new HashSet<>(partSpecTree.getChildCount()); + for (int i = 0; i < partSpecTree.getChildCount(); ++i) { + CommonTree partSpecSingleKey = (CommonTree) partSpecTree.getChild(i); + assert (partSpecSingleKey.getType() == HiveParser.TOK_PARTVAL); + String key = stripIdentifierQuotes(partSpecSingleKey.getChild(0).getText()).toLowerCase(); + String operator = partSpecSingleKey.getChild(1).getText(); + ASTNode partValNode = (ASTNode)partSpecSingleKey.getChild(2); + TypeCheckCtx typeCheckCtx = new TypeCheckCtx(null); + ExprNodeConstantDesc valExpr = + (ExprNodeConstantDesc)TypeCheckProcFactory.genExprNode(partValNode, typeCheckCtx).get(partValNode); + Object val = valExpr.getValue(); + + boolean isDefaultPartitionName = val.equals(defaultPartitionName); + + String type = colTypes.get(key); + PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type); + if (type == null) { + throw new SemanticException("Column " + key + " not found"); + } + // Create the corresponding hive expression to filter on partition columns. + if (!isDefaultPartitionName) { + if (!valExpr.getTypeString().equals(type)) { + Converter converter = ObjectInspectorConverters.getConverter( + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(valExpr.getTypeInfo()), + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(pti)); + val = converter.convert(valExpr.getValue()); + } + } + + ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true); + ExprNodeGenericFuncDesc op; + if (!isDefaultPartitionName) { + op = PartitionUtils.makeBinaryPredicate(operator, column, new ExprNodeConstantDesc(pti, val)); + } else { + GenericUDF originalOp = FunctionRegistry.getFunctionInfo(operator).getGenericUDF(); + String fnName; + if (FunctionRegistry.isEq(originalOp)) { + fnName = "isnull"; + } else if (FunctionRegistry.isNeq(originalOp)) { + fnName = "isnotnull"; + } else { + throw new SemanticException( + "Cannot use " + operator + " in a default partition spec; only '=' and '!=' are allowed."); + } + op = PartitionUtils.makeUnaryPredicate(fnName, column); + } + // If it's multi-expr filter (e.g. a='5', b='2012-01-02'), AND with previous exprs. + expr = (expr == null) ? op : PartitionUtils.makeBinaryPredicate("and", expr, op); + names.add(key); + } + + if (expr == null) { + continue; + } + + // We got the expr for one full partition spec. Determine the prefix length. + int prefixLength = calculatePartPrefix(table, names); + List orExpr = result.get(prefixLength); + // We have to tell apart partitions resulting from spec with different prefix lengths. + // So, if we already have smth for the same prefix length, we can OR the two. + // If we don't, create a new separate filter. In most cases there will only be one. + if (orExpr == null) { + result.put(prefixLength, Lists.newArrayList(expr)); + } else if (canGroupExprs) { + orExpr.set(0, PartitionUtils.makeBinaryPredicate("or", expr, orExpr.get(0))); + } else { + orExpr.add(expr); + } + } + return result; + } + + /** + * Calculates the partition prefix length based on the drop spec. + * This is used to avoid deleting archived partitions with lower level. + * For example, if, for A and B key cols, drop spec is A=5, B=6, we shouldn't drop + * archived A=5/, because it can contain B-s other than 6. + */ + private int calculatePartPrefix(Table tbl, Set partSpecKeys) { + int partPrefixToDrop = 0; + for (FieldSchema fs : tbl.getPartCols()) { + if (!partSpecKeys.contains(fs.getName())) { + break; + } + ++partPrefixToDrop; + } + return partPrefixToDrop; + } + + protected abstract boolean expectView(); + + /** + * Add the table partitions to be modified in the output, so that it is available for the + * pre-execution hook. If the partition does not exist, throw an error if + * throwIfNonExistent is true, otherwise ignore it. + */ + private void addTableDropPartsOutputs(Table tab, Collection> partitionSpecs, + boolean throwIfNonExistent) throws SemanticException { + for (List specs : partitionSpecs) { + for (ExprNodeGenericFuncDesc partitionSpec : specs) { + List parts = new ArrayList<>(); + + boolean hasUnknown = false; + try { + hasUnknown = db.getPartitionsByExpr(tab, partitionSpec, conf, parts); + } catch (Exception e) { + throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partitionSpec.getExprString()), e); + } + if (hasUnknown) { + throw new SemanticException("Unexpected unknown partitions for " + partitionSpec.getExprString()); + } + + // TODO: ifExists could be moved to metastore. In fact it already supports that. Check it + // for now since we get parts for output anyway, so we can get the error message + // earlier... If we get rid of output, we can get rid of this. + if (parts.isEmpty()) { + if (throwIfNonExistent) { + throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partitionSpec.getExprString())); + } + } + for (Partition p : parts) { + outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_EXCLUSIVE)); + } + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionAnalyzer.java new file mode 100644 index 0000000000..5d415f645d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionAnalyzer.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.drop; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for drop partition commands for tables. + */ +@DDLType(type=HiveParser.TOK_ALTERTABLE_DROPPARTS) +public class AlterTableDropPartitionAnalyzer extends AbstractDropPartitionAnalyzer { + public AlterTableDropPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean expectView() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionDesc.java index f5dc34200c..dbb328955a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.drop; import java.io.Serializable; import java.util.ArrayList; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java similarity index 93% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java index 6f0dfba398..9d186db7af 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionOperation.java @@ -16,24 +16,25 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.drop; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import com.google.common.collect.Iterables; @@ -104,7 +105,7 @@ private void dropPartitionForReplication(Table table, ReplicationSpec replicatio private void dropPartitions() throws HiveException { // ifExists is currently verified in DDLSemanticAnalyzer - String[] names = Utilities.getDbTableName(desc.getTableName()); + TableName tablenName = HiveTableName.of(desc.getTableName()); List> partitionExpressions = new ArrayList<>(desc.getPartSpecs().size()); for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()) { @@ -112,8 +113,10 @@ private void dropPartitions() throws HiveException { SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec()))); } - List droppedPartitions = context.getDb().dropPartitions(names[0], names[1], partitionExpressions, - PartitionDropOptions.instance().deleteData(true).ifExists(true).purgeData(desc.getIfPurge())); + PartitionDropOptions options = + PartitionDropOptions.instance().deleteData(true).ifExists(true).purgeData(desc.getIfPurge()); + List droppedPartitions = context.getDb().dropPartitions(tablenName.getDb(), tablenName.getTable(), + partitionExpressions, options); for (Partition partition : droppedPartitions) { context.getConsole().printInfo("Dropped the partition " + partition.getName()); // We have already locked the table, don't lock the partitions. diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterViewDropPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterViewDropPartitionAnalyzer.java new file mode 100644 index 0000000000..4278d98634 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterViewDropPartitionAnalyzer.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.drop; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for drop partition commands for views. + */ +@DDLType(type=HiveParser.TOK_ALTERVIEW_DROPPARTS) +public class AlterViewDropPartitionAnalyzer extends AbstractDropPartitionAnalyzer { + public AlterViewDropPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected boolean expectView() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/package-info.java new file mode 100644 index 0000000000..d713305513 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Drop partition DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.partition.drop; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionAnalyzer.java new file mode 100644 index 0000000000..702ef0b6ab --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionAnalyzer.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.exchange; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for exchange partition commands. + */ +@DDLType(type=HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) +public class AlterTableExchangePartitionAnalyzer extends AbstractAlterTableAnalyzer { + public AlterTableExchangePartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + Table destTable = getTable(tableName); + Table sourceTable = getTable(getUnescapedName((ASTNode)command.getChild(1))); + + // Get the partition specs + Map partitionSpecs = getValidatedPartSpec(sourceTable, (ASTNode)command.getChild(0), conf, false); + PartitionUtils.validatePartitions(conf, partitionSpecs); + + boolean sameColumns = MetaStoreUtils.compareFieldColumns(destTable.getAllCols(), sourceTable.getAllCols()); + boolean samePartitions = MetaStoreUtils.compareFieldColumns(destTable.getPartitionKeys(), + sourceTable.getPartitionKeys()); + if (!sameColumns || !samePartitions) { + throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg()); + } + + // Exchange partition is not allowed with transactional tables. + // If only source is transactional table, then target will see deleted rows too as no snapshot + // isolation applicable for non-acid tables. + // If only target is transactional table, then data would be visible to all ongoing transactions + // affecting the snapshot isolation. + // If both source and targets are transactional tables, then target partition may have delta/base + // files with write IDs may not be valid. It may affect snapshot isolation for on-going txns as well. + if (AcidUtils.isTransactionalTable(sourceTable) || AcidUtils.isTransactionalTable(destTable)) { + throw new SemanticException(ErrorMsg.EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES.getMsg()); + } + + // check if source partition exists + PartitionUtils.getPartitions(db, sourceTable, partitionSpecs, true); + + // Verify that the partitions specified are continuous + // If a subpartition value is specified without specifying a partition's value then we throw an exception + int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partitionSpecs); + if (counter < 0) { + throw new SemanticException(ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partitionSpecs.toString())); + } + + List destPartitions = null; + try { + destPartitions = PartitionUtils.getPartitions(db, destTable, partitionSpecs, true); + } catch (SemanticException ex) { + // We should expect a semantic exception being throw as this partition should not be present. + } + if (destPartitions != null) { + // If any destination partition is present then throw a Semantic Exception. + throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString())); + } + + AlterTableExchangePartitionsDesc desc = + new AlterTableExchangePartitionsDesc(sourceTable, destTable, partitionSpecs); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + inputs.add(new ReadEntity(sourceTable)); + outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED)); + } + + + /** + * @return >=0 if no subpartition value is specified without a partition's value being specified else it returns -1 + */ + private int isPartitionValueContinuous(List partitionKeys, Map partitionSpecs) { + int counter = 0; + for (FieldSchema partitionKey : partitionKeys) { + if (partitionSpecs.containsKey(partitionKey.getName())) { + counter++; + continue; + } + return partitionSpecs.size() == counter ? counter : -1; + } + return counter; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionsDesc.java similarity index 96% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionsDesc.java index 63adaa62cd..e21c5ae939 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionsDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.exchange; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionsOperation.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionsOperation.java index 629dda7a00..52890952cb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/AlterTableExchangePartitionsOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.exchange; import java.util.List; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/package-info.java new file mode 100644 index 0000000000..122bbdf614 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/exchange/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Exchange partition DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.partition.exchange; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionAnalyzer.java new file mode 100644 index 0000000000..14424d3d05 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionAnalyzer.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.rename; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for rename partition commands. + */ +@DDLType(type=HiveParser.TOK_ALTERTABLE_RENAMEPART) +public class AlterTableRenamePartitionAnalyzer extends AbstractAlterTableAnalyzer { + public AlterTableRenamePartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + Table table = getTable(tableName, true); + validateAlterTableType(table, AlterTableType.RENAMEPARTITION, false); + + Map newPartitionSpec = getValidatedPartSpec(table, (ASTNode)command.getChild(0), conf, false); + if (newPartitionSpec == null) { + throw new SemanticException("RENAME PARTITION Missing Destination" + command); + } + ReadEntity re = new ReadEntity(table); + re.noLockNeeded(); + inputs.add(re); + + List> allPartitionSpecs = new ArrayList<>(); + allPartitionSpecs.add(partitionSpec); + allPartitionSpecs.add(newPartitionSpec); + PartitionUtils.addTablePartsOutputs(db, outputs, table, allPartitionSpecs, false, + WriteEntity.WriteType.DDL_EXCLUSIVE); + + AlterTableRenamePartitionDesc desc = new AlterTableRenamePartitionDesc(tableName, partitionSpec, newPartitionSpec, + null, table); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + + if (AcidUtils.isTransactionalTable(table)) { + setAcidDdlDesc(desc); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionDesc.java similarity index 98% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionDesc.java index f7e38c3a29..e712d03bd1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.rename; import java.io.Serializable; import java.util.LinkedHashMap; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionOperation.java similarity index 94% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionOperation.java index 8f9a530083..4eff7c1714 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/AlterTableRenamePartitionOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.rename; import java.util.ArrayList; import java.util.Map; @@ -26,13 +26,13 @@ import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.ddl.table.AlterTableUtils; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; @@ -58,8 +58,7 @@ public int execute() throws HiveException { return 0; } - String[] names = Utilities.getDbTableName(tableName); - if (Utils.isBootstrapDumpInProgress(context.getDb(), names[0])) { + if (Utils.isBootstrapDumpInProgress(context.getDb(), HiveTableName.of(tableName).getDb())) { LOG.error("DDLTask: Rename Partition not allowed as bootstrap dump in progress"); throw new HiveException("Rename Partition: Not allowed as bootstrap dump in progress"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/package-info.java new file mode 100644 index 0000000000..cebcf9cdf8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/rename/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Rename partition DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.partition.rename; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java new file mode 100644 index 0000000000..2466577d94 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition.show; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.HiveTableName; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Analyzer for show partition commands. + */ +@DDLType(type=HiveParser.TOK_SHOWPARTITIONS) +public class ShowPartitionAnalyzer extends BaseSemanticAnalyzer { + public ShowPartitionAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + public void analyzeInternal(ASTNode ast) throws SemanticException { + ctx.setResFile(ctx.getLocalTmpPath()); + + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + + List> partSpecs = getPartitionSpecs(getTable(tableName), ast); + assert (partSpecs.size() <= 1); + Map partSpec = (partSpecs.size() > 0) ? partSpecs.get(0) : null; + + Table table = getTable(HiveTableName.of(tableName)); + inputs.add(new ReadEntity(table)); + + ShowPartitionsDesc desc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); + Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); + rootTasks.add(task); + + task.setFetchSource(true); + setFetchTask(createFetchTask(ShowPartitionsDesc.SCHEMA)); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java index 369a5b54ed..eeef253af8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsDesc.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.show; import java.io.Serializable; import java.util.Map; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java similarity index 97% rename from ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsOperation.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java index e870528a4f..2b1a002748 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.ddl.table.partition; +package org.apache.hadoop.hive.ql.ddl.table.partition.show; import java.io.DataOutputStream; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/package-info.java new file mode 100644 index 0000000000..d49c1e25c2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Show partition DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.table.partition.show; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterViewAnalyzerCategory.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterViewAnalyzerCategory.java new file mode 100644 index 0000000000..d43dc9eef7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/AlterViewAnalyzerCategory.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.view; + +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLSemanticAnalyzerCategory; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; + +/** + * Alter View category helper. It derives the actual type of the command from the root element, by selecting the type + * of the second child, as the Alter View commands have this structure: viewName command partitionSpec? + */ +@DDLType(type=HiveParser.TOK_ALTERVIEW) +public class AlterViewAnalyzerCategory implements DDLSemanticAnalyzerCategory { + @Override + public int getType(ASTNode root) { + return root.getChild(1).getType(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/package-info.java new file mode 100644 index 0000000000..a80d2cb3c8 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Create view DDL operation. */ +package org.apache.hadoop.hive.ql.ddl.view.create; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java index 251193e161..b9d6679edd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hive.ql.exec.repl.bootstrap.events; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; public interface PartitionEvent extends TableEvent { AlterTableAddPartitionDesc lastPartitionReplicated(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java index 992a4caddd..10732b0f69 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java @@ -18,7 +18,7 @@ Licensed to the Apache Software Foundation (ASF) under one package org.apache.hadoop.hive.ql.exec.repl.bootstrap.events; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java index 0c27a9965f..a79f5b7123 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java @@ -19,7 +19,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.PartitionEvent; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.TableEvent; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.ReplicationState; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java index 9bc8e97bac..b90f609a29 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.TableEvent; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java index e15dec340d..a67184d4ac 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java @@ -19,7 +19,7 @@ import java.io.Serializable; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; public class ReplicationState implements Serializable { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index df64ea9c60..11597740e2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.drop.AlterTableDropPartitionDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index a5af560504..fc7f226d77 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec.repl.util; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.repl.ReplConst; @@ -31,12 +30,12 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableSetPropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; @@ -57,7 +56,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.io.Serializable; import static org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration.TableMigrationOption.MANAGED; @@ -123,9 +121,9 @@ String type = table.getPartColByName(key).getType(); PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type); ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true); - ExprNodeGenericFuncDesc op = DDLSemanticAnalyzer.makeBinaryPredicate( + ExprNodeGenericFuncDesc op = PartitionUtils.makeBinaryPredicate( "=", column, new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, val)); - expr = (expr == null) ? op : DDLSemanticAnalyzer.makeBinaryPredicate("and", expr, op); + expr = (expr == null) ? op : PartitionUtils.makeBinaryPredicate("and", expr, op); } if (expr != null) { partitionDesc.add(expr); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 32edabccf3..b0ceecb24e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -1754,39 +1754,6 @@ protected Table getTable(String database, String tblName, boolean throwException return tab; } - protected Partition getPartition(Table table, Map partSpec, - boolean throwException) throws SemanticException { - Partition partition; - try { - partition = db.getPartition(table, partSpec, false); - } catch (Exception e) { - throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); - } - if (partition == null && throwException) { - throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); - } - return partition; - } - - protected List getPartitions(Table table, Map partSpec, - boolean throwException) throws SemanticException { - List partitions; - try { - partitions = partSpec == null ? db.getPartitions(table) : - db.getPartitions(table, partSpec); - } catch (Exception e) { - throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); - } - if (partitions.isEmpty() && throwException) { - throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); - } - return partitions; - } - - protected String toMessage(ErrorMsg message, Object detail) { - return detail == null ? message.getMsg() : message.getMsg(detail.toString()); - } - public List> getAllRootTasks() { return rootTasks; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index ad6817c32b..ab1ddce519 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -22,10 +22,8 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; @@ -34,7 +32,6 @@ import java.util.Set; import org.antlr.runtime.tree.CommonTree; -import org.antlr.runtime.tree.Tree; import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -50,7 +47,6 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLDesc; @@ -72,12 +68,7 @@ import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableTouchDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.AlterTableUnsetPropertiesDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAlterPartitionDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableExchangePartitionsDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableRenamePartitionDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableArchiveDesc; import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableClusteredByDesc; import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableCompactDesc; @@ -95,7 +86,6 @@ import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableUnarchiveDesc; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; -import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -112,7 +102,6 @@ import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -120,10 +109,6 @@ import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; -import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; -import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; @@ -133,24 +118,16 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.ValidationUtility; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TimestampLocalTZTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.apache.hadoop.mapred.InputFormat; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; - /** * DDLSemanticAnalyzer. * @@ -159,7 +136,6 @@ private static final Logger LOG = LoggerFactory.getLogger(DDLSemanticAnalyzer.class); private static final Map TokenToTypeName = new HashMap(); - private final Set reservedPartitionValues; // Equivalent to acidSinks, but for DDL operations that change data. private DDLDescWithWriteId ddlDescWithWriteId; @@ -223,14 +199,6 @@ public DDLSemanticAnalyzer(QueryState queryState) throws SemanticException { public DDLSemanticAnalyzer(QueryState queryState, Hive db) throws SemanticException { super(queryState, db); - reservedPartitionValues = new HashSet(); - // Partition can't have this name - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME)); - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME)); - // Partition value can't end in this suffix - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL)); - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED)); - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED)); } @Override @@ -265,12 +233,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { analyzeAlterTableArchive(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) { analyzeAlterTableArchive(tName, ast, true); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { - analyzeAlterTableAddParts(tName, ast, false); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { - analyzeAlterTableDropParts(tName, ast, false); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) { - analyzeAlterTablePartColType(tName, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) { analyzeAlterTableProps(tName, null, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) { @@ -280,8 +242,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { analyzeAlterTableProps(tName, partSpec, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_SKEWED) { analyzeAlterTableSkewedby(tName, ast); - } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) { - analyzeExchangePartition(tName, ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_FILEFORMAT) { analyzeAlterTableFileFormat(ast, tName, partSpec); @@ -297,8 +257,6 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_SERDEPROPERTIES) { analyzeAlterTableSerdeProps(ast, tName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { - analyzeAlterTableRenamePart(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION) { analyzeAlterTableSkewedLocation(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_BUCKETS || @@ -358,19 +316,11 @@ public void analyzeInternal(ASTNode input) throws SemanticException { analyzeAlterTableProps(tName, null, ast, true, false); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) { analyzeAlterTableProps(tName, null, ast, true, true); - } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_ADDPARTS) { - analyzeAlterTableAddParts(tName, ast, true); - } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPARTS) { - analyzeAlterTableDropParts(tName, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_RENAME) { analyzeAlterTableRename(tName, ast, true); } break; } - case HiveParser.TOK_SHOWPARTITIONS: - ctx.setResFile(ctx.getLocalTmpPath()); - analyzeShowPartitions(ast); - break; case HiveParser.TOK_LOCKTABLE: analyzeLockTable(ast); break; @@ -426,81 +376,6 @@ private void analyzeAlterTableUpdateStats(ASTNode ast, TableName tblName, Map partSpecs = getValidatedPartSpec(sourceTable, (ASTNode)ast.getChild(0), conf, false); - validatePartitionValues(partSpecs); - boolean sameColumns = MetaStoreUtils.compareFieldColumns( - destTable.getAllCols(), sourceTable.getAllCols()); - boolean samePartitions = MetaStoreUtils.compareFieldColumns( - destTable.getPartitionKeys(), sourceTable.getPartitionKeys()); - if (!sameColumns || !samePartitions) { - throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg()); - } - - // Exchange partition is not allowed with transactional tables. - // If only source is transactional table, then target will see deleted rows too as no snapshot - // isolation applicable for non-acid tables. - // If only target is transactional table, then data would be visible to all ongoing transactions - // affecting the snapshot isolation. - // If both source and targets are transactional tables, then target partition may have delta/base - // files with write IDs may not be valid. It may affect snapshot isolation for on-going txns as well. - if (AcidUtils.isTransactionalTable(sourceTable) || AcidUtils.isTransactionalTable(destTable)) { - throw new SemanticException(ErrorMsg.EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES.getMsg()); - } - - // check if source partition exists - getPartitions(sourceTable, partSpecs, true); - - // Verify that the partitions specified are continuous - // If a subpartition value is specified without specifying a partition's value - // then we throw an exception - int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs); - if (counter < 0) { - throw new SemanticException( - ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString())); - } - List destPartitions = null; - try { - destPartitions = getPartitions(destTable, partSpecs, true); - } catch (SemanticException ex) { - // We should expect a semantic exception being throw as this partition - // should not be present. - } - if (destPartitions != null) { - // If any destination partition is present then throw a Semantic Exception. - throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString())); - } - AlterTableExchangePartitionsDesc alterTableExchangePartition = - new AlterTableExchangePartitionsDesc(sourceTable, destTable, partSpecs); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTableExchangePartition))); - - inputs.add(new ReadEntity(sourceTable)); - outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED)); - } - - /** - * @param partitionKeys the list of partition keys of the table - * @param partSpecs the partition specs given by the user - * @return >=0 if no subpartition value is specified without a partition's - * value being specified else it returns -1 - */ - private int isPartitionValueContinuous(List partitionKeys, - Map partSpecs) { - int counter = 0; - for (FieldSchema partitionKey : partitionKeys) { - if (partSpecs.containsKey(partitionKey.getName())) { - counter++; - continue; - } - return partSpecs.size() == counter ? counter : -1; - } - return counter; - } - private void analyzeTruncateTable(ASTNode ast) throws SemanticException { ASTNode root = (ASTNode) ast.getChild(0); // TOK_TABLE_PARTITION final String tableName = getUnescapedName((ASTNode) root.getChild(0)); @@ -549,18 +424,18 @@ private void addTruncateTableOutputs(ASTNode root, Table table, Map outputs.add(alterTableOutput); //do not need the lock for partitions since they are covered by the table lock if (isCascade) { - for (Partition part : getPartitions(tab, partSpec, false)) { + for (Partition part : PartitionUtils.getPartitions(db, tab, partSpec, false)) { outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); } } @@ -975,7 +850,7 @@ private void addInputsOutputsAlterTable(TableName tableName, Map if (isFullSpec(tab, partSpec)) { // Fully specified partition spec - Partition part = getPartition(tab, partSpec, true); + Partition part = PartitionUtils.getPartition(db, tab, partSpec, true); outputs.add(new WriteEntity(part, writeType)); } else { // Partial partition spec supplied. Make sure this is allowed. @@ -986,7 +861,7 @@ private void addInputsOutputsAlterTable(TableName tableName, Map throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED); } - for (Partition part : getPartitions(tab, partSpec, true)) { + for (Partition part : PartitionUtils.getPartitions(db, tab, partSpec, true)) { outputs.add(new WriteEntity(part, writeType)); } } @@ -1067,7 +942,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, TableName tableName, M throw new SemanticException("source table " + tableName + " is partitioned but no partition desc found."); } else { - Partition part = getPartition(tblObj, partSpec, false); + Partition part = PartitionUtils.getPartition(db, tblObj, partSpec, false); if (part == null) { throw new SemanticException("source table " + tableName + " is partitioned but partition not found."); @@ -1392,7 +1267,7 @@ private void validateTable(TableName tableName, Map partSpec) throws SemanticException { Table tab = getTable(tableName); if (partSpec != null) { - getPartition(tab, partSpec, true); + PartitionUtils.getPartition(db, tab, partSpec, true); } } @@ -1474,25 +1349,6 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { LOG.info("analyzeDescribeTable done"); } - private void analyzeShowPartitions(ASTNode ast) throws SemanticException { - ShowPartitionsDesc showPartsDesc; - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - List> partSpecs = getPartitionSpecs(getTable(tableName), ast); - // We only can have a single partition spec - assert (partSpecs.size() <= 1); - Map partSpec = null; - if (partSpecs.size() > 0) { - partSpec = partSpecs.get(0); - } - - validateTable(HiveTableName.ofNullableWithNoDefault(tableName), null); - - showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); - inputs.add(new ReadEntity(getTable(tableName))); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showPartsDesc))); - setFetchTask(createFetchTask(ShowPartitionsDesc.SCHEMA)); - } - private void analyzeShowTables(ASTNode ast) throws SemanticException { ShowTablesDesc showTblsDesc; String dbName = SessionState.get().getCurrentDatabase(); @@ -1798,31 +1654,6 @@ private void analyzeAlterTableRename(TableName source, ASTNode ast, boolean expe rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableRenamePart(ASTNode ast, TableName tblName, - Map oldPartSpec) throws SemanticException { - Table tab = getTable(tblName, true); - validateAlterTableType(tab, AlterTableType.RENAMEPARTITION); - Map newPartSpec = - getValidatedPartSpec(tab, (ASTNode)ast.getChild(0), conf, false); - if (newPartSpec == null) { - throw new SemanticException("RENAME PARTITION Missing Destination" + ast); - } - ReadEntity re = new ReadEntity(tab); - re.noLockNeeded(); - inputs.add(re); - - List> partSpecs = new ArrayList>(); - partSpecs.add(oldPartSpec); - partSpecs.add(newPartSpec); - addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE); - AlterTableRenamePartitionDesc renamePartitionDesc = new AlterTableRenamePartitionDesc(tblName, oldPartSpec, - newPartSpec, null, tab); - if (AcidUtils.isTransactionalTable(tab)) { - setAcidDdlDesc(renamePartitionDesc); - } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), renamePartitionDesc))); - } - private void analyzeAlterTableBucketNum(ASTNode ast, TableName tblName, Map partSpec) throws SemanticException { Table tab = getTable(tblName, true); @@ -1838,299 +1669,6 @@ private void analyzeAlterTableBucketNum(ASTNode ast, TableName tblName, Map> partSpecs = - getFullPartitionSpecs(ast, tab, canGroupExprs); - if (partSpecs.isEmpty()) - { - return; // nothing to do - } - - validateAlterTableType(tab, AlterTableType.DROPPARTITION, expectView); - ReadEntity re = new ReadEntity(tab); - re.noLockNeeded(); - inputs.add(re); - - addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists); - - AlterTableDropPartitionDesc dropTblDesc = - new AlterTableDropPartitionDesc(tName, partSpecs, mustPurge, replicationSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc))); - } - - private void analyzeAlterTablePartColType(TableName tableName, ASTNode ast) - throws SemanticException { - - - // check if table exists. - Table tab = getTable(tableName); - inputs.add(new ReadEntity(tab)); - - // validate the DDL is a valid operation on the table. - validateAlterTableType(tab, AlterTableType.ALTERPARTITION, false); - - // Alter table ... partition column ( column newtype) only takes one column at a time. - // It must have a column name followed with type. - ASTNode colAst = (ASTNode) ast.getChild(0); - - FieldSchema newCol = new FieldSchema(); - - // get column name - String name = colAst.getChild(0).getText().toLowerCase(); - newCol.setName(unescapeIdentifier(name)); - - // get column type - ASTNode typeChild = (ASTNode) (colAst.getChild(1)); - newCol.setType(getTypeStringFromAST(typeChild)); - - if (colAst.getChildCount() == 3) { - newCol.setComment(unescapeSQLString(colAst.getChild(2).getText())); - } - - // check if column is defined or not - boolean fFoundColumn = false; - for( FieldSchema col : tab.getTTable().getPartitionKeys()) { - if (col.getName().compareTo(newCol.getName()) == 0) { - fFoundColumn = true; - } - } - - // raise error if we could not find the column - if (!fFoundColumn) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol.getName())); - } - - AlterTableAlterPartitionDesc alterTblAlterPartDesc = - new AlterTableAlterPartitionDesc(tableName.getDbTable(), newCol); - if (AcidUtils.isTransactionalTable(tab)) { - setAcidDdlDesc(alterTblAlterPartDesc); - } - - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblAlterPartDesc))); - } - - /** - * Add one or more partitions to a table. Useful when the data has been copied - * to the right location by some other process. - * - * @param ast - * The parsed command tree. - * - * @param expectView - * True for ALTER VIEW, false for ALTER TABLE. - * - * @throws SemanticException - * Parsing failed - */ - private void analyzeAlterTableAddParts(TableName tName, CommonTree ast, boolean expectView) throws SemanticException { - - // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) - boolean ifNotExists = ast.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS; - - Table table = getTable(tName); - boolean isView = table.isView(); - validateAlterTableType(table, AlterTableType.ADDPARTITION, expectView); - outputs.add(new WriteEntity(table, - /*use DDL_EXCLUSIVE to cause X lock to prevent races between concurrent add partition calls - with IF NOT EXISTS. w/o this 2 concurrent calls to add the same partition may both add - data since for transactional tables creating partition metadata and moving data there are - 2 separate actions. */ - ifNotExists && AcidUtils.isTransactionalTable(table) ? WriteType.DDL_EXCLUSIVE - : WriteEntity.WriteType.DDL_SHARED)); - - int numCh = ast.getChildCount(); - int start = ifNotExists ? 1 : 0; - - String currentLocation = null; - Map currentPart = null; - // Parser has done some verification, so the order of tokens doesn't need to be verified here. - - List partitions = new ArrayList<>(); - for (int num = start; num < numCh; num++) { - ASTNode child = (ASTNode) ast.getChild(num); - switch (child.getToken().getType()) { - case HiveParser.TOK_PARTSPEC: - if (currentPart != null) { - partitions.add(createPartitionDesc(table, currentLocation, currentPart)); - currentLocation = null; - } - currentPart = getValidatedPartSpec(table, child, conf, true); - validatePartitionValues(currentPart); // validate reserved values - break; - case HiveParser.TOK_PARTITIONLOCATION: - // if location specified, set in partition - if (isView) { - throw new SemanticException("LOCATION clause illegal for view partition"); - } - currentLocation = unescapeSQLString(child.getChild(0).getText()); - inputs.add(toReadEntity(currentLocation)); - break; - default: - throw new SemanticException("Unknown child: " + child); - } - } - - // add the last one - if (currentPart != null) { - partitions.add(createPartitionDesc(table, currentLocation, currentPart)); - } - - if (partitions.isEmpty()) { - // nothing to do - return; - } - - AlterTableAddPartitionDesc addPartitionDesc = new AlterTableAddPartitionDesc(table.getDbName(), - table.getTableName(), ifNotExists, partitions); - - Task ddlTask = - TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc)); - rootTasks.add(ddlTask); - handleTransactionalTable(table, addPartitionDesc, ddlTask); - - if (isView) { - // Compile internal query to capture underlying table partition dependencies - StringBuilder cmd = new StringBuilder(); - cmd.append("SELECT * FROM "); - cmd.append(HiveUtils.unparseIdentifier(tName.getDb())); - cmd.append("."); - cmd.append(HiveUtils.unparseIdentifier(tName.getTable())); - cmd.append(" WHERE "); - boolean firstOr = true; - for (AlterTableAddPartitionDesc.PartitionDesc partitionDesc : partitions) { - if (firstOr) { - firstOr = false; - } else { - cmd.append(" OR "); - } - boolean firstAnd = true; - cmd.append("("); - for (Map.Entry entry : partitionDesc.getPartSpec().entrySet()) { - if (firstAnd) { - firstAnd = false; - } else { - cmd.append(" AND "); - } - cmd.append(HiveUtils.unparseIdentifier(entry.getKey())); - cmd.append(" = '"); - cmd.append(HiveUtils.escapeString(entry.getValue())); - cmd.append("'"); - } - cmd.append(")"); - } - // FIXME: is it ok to have a completely new querystate? - QueryState newQueryState = QueryState.getNewQueryState(conf, queryState.getLineageState()); - // FIXME: this driver instance is never closed - Driver driver = new Driver(newQueryState); - int rc = driver.compile(cmd.toString(), false); - if (rc != 0) { - throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg()); - } - inputs.addAll(driver.getPlan().getInputs()); - } - } - - private AlterTableAddPartitionDesc.PartitionDesc createPartitionDesc(Table table, String currentLocation, - Map currentPart) { - Map params = null; - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER) && currentLocation == null) { - params = new HashMap(); - StatsSetupConst.setStatsStateForCreateTable(params, - MetaStoreUtils.getColumnNames(table.getCols()), StatsSetupConst.TRUE); - } - return new AlterTableAddPartitionDesc.PartitionDesc(currentPart, currentLocation, params); - } - - /** - * Add partition for Transactional tables needs to add (copy/rename) the data so that it lands - * in a delta_x_x/ folder in the partition dir. - */ - private void handleTransactionalTable(Table tab, AlterTableAddPartitionDesc addPartitionDesc, - Task ddlTask) throws SemanticException { - if(!AcidUtils.isTransactionalTable(tab)) { - return; - } - Long writeId = null; - int stmtId = 0; - - for (AlterTableAddPartitionDesc.PartitionDesc partitonDesc : addPartitionDesc.getPartitions()) { - if (partitonDesc.getLocation() != null) { - AcidUtils.validateAcidPartitionLocation(partitonDesc.getLocation(), conf); - if(addPartitionDesc.isIfNotExists()) { - //Don't add partition data if it already exists - Partition oldPart = getPartition(tab, partitonDesc.getPartSpec(), false); - if(oldPart != null) { - continue; - } - } - if(writeId == null) { - //so that we only allocate a writeId only if actually adding data - // (vs. adding a partition w/o data) - try { - writeId = getTxnMgr().getTableWriteId(tab.getDbName(), - tab.getTableName()); - } catch (LockException ex) { - throw new SemanticException("Failed to allocate the write id", ex); - } - stmtId = getTxnMgr().getStmtIdAndIncrement(); - } - LoadTableDesc loadTableWork = new LoadTableDesc(new Path(partitonDesc.getLocation()), - Utilities.getTableDesc(tab), partitonDesc.getPartSpec(), - LoadTableDesc.LoadFileType.KEEP_EXISTING, //not relevant - creating new partition - writeId); - loadTableWork.setStmtId(stmtId); - loadTableWork.setInheritTableSpecs(true); - try { - partitonDesc.setLocation(new Path(tab.getDataLocation(), - Warehouse.makePartPath(partitonDesc.getPartSpec())).toString()); - } - catch (MetaException ex) { - throw new SemanticException("Could not determine partition path due to: " - + ex.getMessage(), ex); - } - Task moveTask = TaskFactory.get( - new MoveWork(getInputs(), getOutputs(), loadTableWork, null, - true,//make sure to check format - false));//is this right? - ddlTask.addDependentTask(moveTask); - } - } - } /** * Rewrite the metadata for one or more partitions in a table. Useful when * an external process modifies files on HDFS and you want the pre/post @@ -2155,7 +1693,7 @@ private void analyzeAlterTableTouch(TableName tName, CommonTree ast) throws Sema outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); } else { - addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); + PartitionUtils.addTablePartsOutputs(db, outputs, tab, partSpecs, false, WriteEntity.WriteType.DDL_NO_LOCK); for (Map partSpec : partSpecs) { AlterTableTouchDesc touchDesc = new AlterTableTouchDesc(tName.getDbTable(), partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); @@ -2173,7 +1711,7 @@ private void analyzeAlterTableArchive(TableName tName, CommonTree ast, boolean i // partition name to value List> partSpecs = getPartitionSpecs(tab, ast); - addTablePartsOutputs(tab, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); + PartitionUtils.addTablePartsOutputs(db, outputs, tab, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); validateAlterTableType(tab, AlterTableType.ARCHIVE); inputs.add(new ReadEntity(tab)); @@ -2201,257 +1739,6 @@ private void analyzeAlterTableArchive(TableName tName, CommonTree ast, boolean i rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), archiveDesc))); } - /** - * Get the partition specs from the tree. This stores the full specification - * with the comparator operator into the output list. - * - * @param ast Tree to extract partitions from. - * @param tab Table. - * @return Map of partitions by prefix length. Most of the time prefix length will - * be the same for all partition specs, so we can just OR the expressions. - */ - private Map> getFullPartitionSpecs( - CommonTree ast, Table tab, boolean canGroupExprs) throws SemanticException { - String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); - Map colTypes = new HashMap(); - for (FieldSchema fs : tab.getPartitionKeys()) { - colTypes.put(fs.getName().toLowerCase(), fs.getType()); - } - - Map> result = - new HashMap>(); - for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { - Tree partSpecTree = ast.getChild(childIndex); - if (partSpecTree.getType() != HiveParser.TOK_PARTSPEC) { - continue; - } - ExprNodeGenericFuncDesc expr = null; - HashSet names = new HashSet(partSpecTree.getChildCount()); - for (int i = 0; i < partSpecTree.getChildCount(); ++i) { - CommonTree partSpecSingleKey = (CommonTree) partSpecTree.getChild(i); - assert (partSpecSingleKey.getType() == HiveParser.TOK_PARTVAL); - String key = stripIdentifierQuotes(partSpecSingleKey.getChild(0).getText()).toLowerCase(); - String operator = partSpecSingleKey.getChild(1).getText(); - ASTNode partValNode = (ASTNode)partSpecSingleKey.getChild(2); - TypeCheckCtx typeCheckCtx = new TypeCheckCtx(null); - ExprNodeConstantDesc valExpr = (ExprNodeConstantDesc)TypeCheckProcFactory - .genExprNode(partValNode, typeCheckCtx).get(partValNode); - Object val = valExpr.getValue(); - - boolean isDefaultPartitionName = val.equals(defaultPartitionName); - - String type = colTypes.get(key); - PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type); - if (type == null) { - throw new SemanticException("Column " + key + " not found"); - } - // Create the corresponding hive expression to filter on partition columns. - if (!isDefaultPartitionName) { - if (!valExpr.getTypeString().equals(type)) { - Converter converter = ObjectInspectorConverters.getConverter( - TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(valExpr.getTypeInfo()), - TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(pti)); - val = converter.convert(valExpr.getValue()); - } - } - - ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true); - ExprNodeGenericFuncDesc op; - if (!isDefaultPartitionName) { - op = makeBinaryPredicate(operator, column, new ExprNodeConstantDesc(pti, val)); - } else { - GenericUDF originalOp = FunctionRegistry.getFunctionInfo(operator).getGenericUDF(); - String fnName; - if (FunctionRegistry.isEq(originalOp)) { - fnName = "isnull"; - } else if (FunctionRegistry.isNeq(originalOp)) { - fnName = "isnotnull"; - } else { - throw new SemanticException("Cannot use " + operator - + " in a default partition spec; only '=' and '!=' are allowed."); - } - op = makeUnaryPredicate(fnName, column); - } - // If it's multi-expr filter (e.g. a='5', b='2012-01-02'), AND with previous exprs. - expr = (expr == null) ? op : makeBinaryPredicate("and", expr, op); - names.add(key); - } - if (expr == null) { - continue; - } - // We got the expr for one full partition spec. Determine the prefix length. - int prefixLength = calculatePartPrefix(tab, names); - List orExpr = result.get(prefixLength); - // We have to tell apart partitions resulting from spec with different prefix lengths. - // So, if we already have smth for the same prefix length, we can OR the two. - // If we don't, create a new separate filter. In most cases there will only be one. - if (orExpr == null) { - result.put(prefixLength, Lists.newArrayList(expr)); - } else if (canGroupExprs) { - orExpr.set(0, makeBinaryPredicate("or", expr, orExpr.get(0))); - } else { - orExpr.add(expr); - } - } - return result; - } - - public static ExprNodeGenericFuncDesc makeBinaryPredicate( - String fn, ExprNodeDesc left, ExprNodeDesc right) throws SemanticException { - return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, - FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(left, right)); - } - public static ExprNodeGenericFuncDesc makeUnaryPredicate( - String fn, ExprNodeDesc arg) throws SemanticException { - return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, - FunctionRegistry.getFunctionInfo(fn).getGenericUDF(), Lists.newArrayList(arg)); - } - /** - * Calculates the partition prefix length based on the drop spec. - * This is used to avoid deleting archived partitions with lower level. - * For example, if, for A and B key cols, drop spec is A=5, B=6, we shouldn't drop - * archived A=5/, because it can contain B-s other than 6. - * @param tbl Table - * @param partSpecKeys Keys present in drop partition spec. - */ - private int calculatePartPrefix(Table tbl, HashSet partSpecKeys) { - int partPrefixToDrop = 0; - for (FieldSchema fs : tbl.getPartCols()) { - if (!partSpecKeys.contains(fs.getName())) { - break; - } - ++partPrefixToDrop; - } - return partPrefixToDrop; - } - - /** - * Certain partition values are are used by hive. e.g. the default partition - * in dynamic partitioning and the intermediate partition values used in the - * archiving process. Naturally, prohibit the user from creating partitions - * with these reserved values. The check that this function is more - * restrictive than the actual limitation, but it's simpler. Should be okay - * since the reserved names are fairly long and uncommon. - */ - private void validatePartitionValues(Map partSpec) - throws SemanticException { - - for (Entry e : partSpec.entrySet()) { - for (String s : reservedPartitionValues) { - String value = e.getValue(); - if (value != null && value.contains(s)) { - throw new SemanticException(ErrorMsg.RESERVED_PART_VAL.getMsg( - "(User value: " + e.getValue() + " Reserved substring: " + s + ")")); - } - } - } - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, no error is thrown. - */ - private void addTablePartsOutputs(Table table, List> partSpecs, - WriteEntity.WriteType writeType) - throws SemanticException { - addTablePartsOutputs(table, partSpecs, false, false, null, writeType); - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, no error is thrown. - */ - private void addTablePartsOutputs(Table table, List> partSpecs, - boolean allowMany, WriteEntity.WriteType writeType) - throws SemanticException { - addTablePartsOutputs(table, partSpecs, false, allowMany, null, writeType); - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, throw an error if - * throwIfNonExistent is true, otherwise ignore it. - */ - private void addTablePartsOutputs(Table table, List> partSpecs, - boolean throwIfNonExistent, boolean allowMany, ASTNode ast, WriteEntity.WriteType writeType) - throws SemanticException { - - Iterator> i; - int index; - for (i = partSpecs.iterator(), index = 1; i.hasNext(); ++index) { - Map partSpec = i.next(); - List parts = null; - if (allowMany) { - try { - parts = db.getPartitions(table, partSpec); - } catch (HiveException e) { - LOG.error("Got HiveException during obtaining list of partitions" - + StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - } else { - parts = new ArrayList(); - try { - Partition p = db.getPartition(table, partSpec, false); - if (p != null) { - parts.add(p); - } - } catch (HiveException e) { - LOG.debug("Wrong specification" + StringUtils.stringifyException(e)); - throw new SemanticException(e.getMessage(), e); - } - } - if (parts.isEmpty()) { - if (throwIfNonExistent) { - throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(ast.getChild(index))); - } - } - for (Partition p : parts) { - // Don't request any locks here, as the table has already been locked. - outputs.add(new WriteEntity(p, writeType)); - } - } - } - - /** - * Add the table partitions to be modified in the output, so that it is available for the - * pre-execution hook. If the partition does not exist, throw an error if - * throwIfNonExistent is true, otherwise ignore it. - */ - private void addTableDropPartsOutputs(Table tab, - Collection> partSpecs, - boolean throwIfNonExistent) throws SemanticException { - for (List specs : partSpecs) { - for (ExprNodeGenericFuncDesc partSpec : specs) { - List parts = new ArrayList(); - boolean hasUnknown = false; - try { - hasUnknown = db.getPartitionsByExpr(tab, partSpec, conf, parts); - } catch (Exception e) { - throw new SemanticException( - ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()), e); - } - if (hasUnknown) { - throw new SemanticException( - "Unexpected unknown partitions for " + partSpec.getExprString()); - } - - // TODO: ifExists could be moved to metastore. In fact it already supports that. Check it - // for now since we get parts for output anyway, so we can get the error message - // earlier... If we get rid of output, we can get rid of this. - if (parts.isEmpty()) { - if (throwIfNonExistent) { - throw new SemanticException( - ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString())); - } - } - for (Partition p : parts) { - outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_EXCLUSIVE)); - } - } - } - } - /** * Analyze alter table's skewed table * diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index eefe2ae143..e9324ee4f3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.table.drop.DropTableDesc; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.add.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 137f721042..8d1136a42b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -93,8 +93,6 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t switch (child.getType()) { case HiveParser.TOK_ALTERVIEW_PROPERTIES: case HiveParser.TOK_ALTERVIEW_DROPPROPERTIES: - case HiveParser.TOK_ALTERVIEW_ADDPARTS: - case HiveParser.TOK_ALTERVIEW_DROPPARTS: case HiveParser.TOK_ALTERVIEW_RENAME: opType = HiveOperation.operationForToken(child.getType()); queryState.setCommandType(opType); @@ -110,7 +108,6 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_SHOWTABLES: case HiveParser.TOK_SHOW_TABLESTATUS: case HiveParser.TOK_SHOW_TBLPROPERTIES: - case HiveParser.TOK_SHOWPARTITIONS: case HiveParser.TOK_SHOWLOCKS: case HiveParser.TOK_SHOWDBLOCKS: case HiveParser.TOK_SHOWCONF: diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index e650f52aa2..066549d9cd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.drop.AlterTableDropPartitionDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java index c936840d31..57f3043dac 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableRenamePartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.rename.AlterTableRenamePartitionDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;