diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 6b43b5333d..abd351d51a 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hive.ql.ddl.table.info.DescTableDesc; import org.apache.hadoop.hive.ql.ddl.table.info.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablesDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -41,9 +43,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.ErrorType; @@ -314,20 +314,10 @@ protected void authorizeDDLWork2(HiveSemanticAnalyzerHookContext cntxt, Hive hiv String dbName = showTableStatus.getDbName() == null ? SessionState.get().getCurrentDatabase() : showTableStatus.getDbName(); authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); - } - } - - @Override - protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) - throws HiveException { - // TODO: add alter database support in HCat - - // Table operations. - - DropPartitionDesc dropPartition = work.getDropPartitionDesc(); - if (dropPartition != null) { + } else if (ddlDesc instanceof AlterTableDropPartitionDesc) { + AlterTableDropPartitionDesc dropPartition = (AlterTableDropPartitionDesc)ddlDesc; //this is actually a ALTER TABLE DROP PARITITION statement - for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()) { + for (AlterTableDropPartitionDesc.PartitionDesc partSpec : dropPartition.getPartSpecs()) { // partitions are not added as write entries in drop partitions in Hive Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropPartition.getTableName()); List partitions = null; @@ -340,8 +330,19 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive authorize(part, Privilege.DROP); } } + } else if (ddlDesc instanceof ShowPartitionsDesc) { + ShowPartitionsDesc showParts = (ShowPartitionsDesc)ddlDesc; + String tableName = extractTableName(showParts.getTabName()); + authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); } + } + + @Override + protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) + throws HiveException { + // TODO: add alter database support in HCat + // Table operations. AlterTableDesc alterTable = work.getAlterTblDesc(); if (alterTable != null) { Table table = hive.getTable(SessionState.get().getCurrentDatabase(), @@ -372,11 +373,5 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive } //other alter operations are already supported by Hive } - - ShowPartitionsDesc showParts = work.getShowPartsDesc(); - if (showParts != null) { - String tableName = extractTableName(showParts.getTabName()); - authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); - } } } diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 66a1737839..fee7ffc4c7 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.cli.CliSessionState; @@ -467,7 +467,7 @@ private static void createPartitionIfNotExists(HiveEndPoint ep, Map partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals); - AddPartitionDesc addPartitionDesc = new AddPartitionDesc(ep.database, ep.table, true); + AlterTableAddPartitionDesc addPartitionDesc = new AlterTableAddPartitionDesc(ep.database, ep.table, true); String partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 3820fabbf9..54de320aaa 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -59,7 +59,8 @@ import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.MoveTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -378,11 +379,9 @@ public boolean validate(Task task) { private boolean hasPartitionTask(Task rootTask) { checkTaskPresent validator = new checkTaskPresent() { public boolean validate(Task task) { - if (task instanceof DDLTask) { - DDLTask ddlTask = (DDLTask)task; - if (ddlTask.getWork().getAddPartitionDesc() != null) { - return true; - } + if (task instanceof DDLTask2) { + DDLTask2 ddlTask = (DDLTask2)task; + return ddlTask.getWork().getDDLDesc() instanceof AlterTableAddPartitionDesc; } return false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java new file mode 100644 index 0000000000..3c6d7eada9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableUtils.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +/** + * Utilities used by some ALTER TABLE commands. + */ +public final class AlterTableUtils { + private AlterTableUtils() { + throw new UnsupportedOperationException("AlterTableUtils should not be instantiated"); + } + + /** + * Validate if the given table/partition is eligible for update. + */ + public static boolean allowOperationInReplicationScope(Hive db, String tableName, Map partSpec, + ReplicationSpec replicationSpec) throws HiveException { + if ((null == replicationSpec) || (!replicationSpec.isInReplicationScope())) { + // Always allow the operation if it is not in replication scope. + return true; + } + + // If the table/partition exist and is older than the event, then just apply the event else noop. + Table existingTable = db.getTable(tableName, false); + if ((existingTable != null) && replicationSpec.allowEventReplacementInto(existingTable.getParameters())) { + // Table exists and is older than the update. Now, need to ensure if update allowed on the partition. + if (partSpec != null) { + Partition existingPtn = db.getPartition(existingTable, partSpec, false); + return ((existingPtn != null) && replicationSpec.allowEventReplacementInto(existingPtn.getParameters())); + } + + // Replacement is allowed as the existing table is older than event + return true; + } + + // The table is missing either due to drop/rename which follows the operation. + // Or the existing table is newer than our update. So, don't allow the update. + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java similarity index 87% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java index 8ea857e956..08a2ffbaf9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionDesc.java @@ -15,35 +15,42 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; + +package org.apache.hadoop.hive.ql.ddl.table.partition; import java.io.Serializable; import java.util.ArrayList; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * Contains the information needed to add one or more partitions. + * DDL task description for ALTER TABLE ... DROP PARTITION ... commands. */ -public class AddPartitionDesc extends DDLDesc implements Serializable { - +@Explain(displayName = "Add Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableAddPartitionDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; - public static class OnePartitionDesc { - public OnePartitionDesc() {} + static { + DDLTask2.registerOperation(AlterTableAddPartitionDesc.class, AlterTableAddPartitionOperation.class); + } - OnePartitionDesc( + public static class PartitionDesc { + PartitionDesc( Map partSpec, String location, Map params) { this(partSpec, location); this.partParams = params; } - OnePartitionDesc(Map partSpec, String location) { + PartitionDesc(Map partSpec, String location) { this.partSpec = partSpec; this.location = location; } @@ -158,12 +165,10 @@ public void setOutputFormat(String outputFormat) { public void setWriteId(long writeId) { this.writeId = writeId; } } - private static final long serialVersionUID = 1L; - String tableName; String dbName; boolean ifNotExists; - List partitions = null; + List partitions = null; boolean replaceMode = false; private ReplicationSpec replicationSpec = null; @@ -171,10 +176,10 @@ public void setOutputFormat(String outputFormat) { /** * For serialization only. */ - public AddPartitionDesc() { + public AlterTableAddPartitionDesc() { } - public AddPartitionDesc( + public AlterTableAddPartitionDesc( String dbName, String tableName, boolean ifNotExists) { super(); this.dbName = dbName; @@ -196,7 +201,7 @@ public AddPartitionDesc( * partition parameters. */ @Deprecated - public AddPartitionDesc(String dbName, String tableName, + public AlterTableAddPartitionDesc(String dbName, String tableName, Map partSpec, String location, Map params) { super(); this.dbName = dbName; @@ -212,9 +217,9 @@ public void addPartition(Map partSpec, String location) { private void addPartition( Map partSpec, String location, Map params) { if (this.partitions == null) { - this.partitions = new ArrayList(); + this.partitions = new ArrayList(); } - this.partitions.add(new OnePartitionDesc(partSpec, location, params)); + this.partitions.add(new PartitionDesc(partSpec, location, params)); } /** @@ -255,7 +260,7 @@ public String getLocationForExplain() { if (this.partitions == null || this.partitions.isEmpty()) return ""; boolean isFirst = true; StringBuilder sb = new StringBuilder(); - for (OnePartitionDesc desc : this.partitions) { + for (PartitionDesc desc : this.partitions) { if (!isFirst) { sb.append(", "); } @@ -270,7 +275,7 @@ public String getPartSpecStringForExplain() { if (this.partitions == null || this.partitions.isEmpty()) return ""; boolean isFirst = true; StringBuilder sb = new StringBuilder(); - for (OnePartitionDesc desc : this.partitions) { + for (PartitionDesc desc : this.partitions) { if (!isFirst) { sb.append(", "); } @@ -299,7 +304,7 @@ public int getPartitionCount() { return this.partitions.size(); } - public OnePartitionDesc getPartition(int i) { + public PartitionDesc getPartition(int i) { return this.partitions.get(i); } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionOperation.java new file mode 100644 index 0000000000..19eb7901d2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAddPartitionOperation.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import java.util.List; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; + +/** + * Operation process of adding a partition to a table. + */ +public class AlterTableAddPartitionOperation extends DDLOperation { + private final AlterTableAddPartitionDesc desc; + + public AlterTableAddPartitionOperation(DDLOperationContext context, AlterTableAddPartitionDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + List parts = context.getDb().createPartitions(desc); + for (Partition part : parts) { + DDLUtils.addIfAbsentByName(new WriteEntity(part, WriteEntity.WriteType.INSERT), context); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionDesc.java new file mode 100644 index 0000000000..33b0205f07 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionDesc.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... PARTITION COLUMN ... commands. + */ +@Explain(displayName = "Alter Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableAlterPartitionDesc implements DDLDesc, DDLDescWithWriteId { + public static final long serialVersionUID = 1; + + static { + DDLTask2.registerOperation(AlterTableAlterPartitionDesc.class, AlterTableAlterPartitionOperation.class); + } + + private final String fqTableName; + private final FieldSchema partKeySpec; + + public AlterTableAlterPartitionDesc(String fqTableName, FieldSchema partKeySpec) { + this.fqTableName = fqTableName; + this.partKeySpec = partKeySpec; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return fqTableName; + } + + public FieldSchema getPartKeySpec() { + return partKeySpec; + } + + @Explain(displayName = "partition key name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPartKeyName() { + return partKeySpec.getName(); + } + + @Explain(displayName = "partition key type", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPartKeyType() { + return partKeySpec.getType(); + } + + @Override + public void setWriteId(long writeId) { + // We don't actually need the write id, but by implementing DDLDescWithWriteId it ensures that it is allocated + } + + @Override + public String getFullTableName() { + return fqTableName; + } + + @Override + public boolean mayNeedWriteId() { + return true; // Checked before setting as the acid desc. + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionOperation.java new file mode 100644 index 0000000000..57cba22695 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableAlterPartitionOperation.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +/** + * Operation process of altering a partition to a table. + */ +public class AlterTableAlterPartitionOperation extends DDLOperation { + private final AlterTableAlterPartitionDesc desc; + + public AlterTableAlterPartitionOperation(DDLOperationContext context, AlterTableAlterPartitionDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Table tbl = context.getDb().getTable(desc.getTableName(), true); + + check(tbl); + setNewPartitionKeys(tbl); + alterTable(tbl); + + return 0; + } + + private void check(Table tbl) throws HiveException { + assert(tbl.isPartitioned()); + try { + int colIndex = getColumnIndex(tbl); + checkPartitionValues(tbl, colIndex); + } catch(Exception e) { + throw new HiveException("Exception while checking type conversion of existing partition values to " + + desc.getPartKeySpec() + " : " + e.getMessage()); + } + } + + private int getColumnIndex(Table tbl) throws HiveException { + int colIndex = -1; + for (FieldSchema col : tbl.getTTable().getPartitionKeys()) { + colIndex++; + if (col.getName().compareTo(desc.getPartKeyName()) == 0) { + return colIndex; + } + } + + throw new HiveException("Cannot find partition column " + desc.getPartKeyName()); + } + + /** + * Check if the existing partition values can be type casted to the new column type + * with a non null value before trying to alter the partition column type. + */ + private void checkPartitionValues(Table tbl, int colIndex) throws HiveException { + TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(desc.getPartKeyType()); + ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); + Converter converter = ObjectInspectorConverters.getConverter( + PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); + + Set partitions = context.getDb().getAllPartitionsOf(tbl); + for (Partition part : partitions) { + if (part.getName().equals(context.getConf().getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { + continue; + } + + try { + String value = part.getValues().get(colIndex); + Object convertedValue = converter.convert(value); + if (convertedValue == null) { + throw new HiveException(" Converting from " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + + " for value : " + value + " resulted in NULL object"); + } + } catch (Exception e) { + throw new HiveException("Exception while converting " + TypeInfoFactory.stringTypeInfo + " to " + + expectedType + " for value : " + part.getValues().get(colIndex)); + } + } + } + + private void setNewPartitionKeys(Table tbl) { + List newPartitionKeys = new ArrayList(); + for (FieldSchema col : tbl.getTTable().getPartitionKeys()) { + if (col.getName().compareTo(desc.getPartKeyName()) == 0) { + newPartitionKeys.add(desc.getPartKeySpec()); + } else { + newPartitionKeys.add(col); + } + } + + tbl.getTTable().setPartitionKeys(newPartitionKeys); + } + + private void alterTable(Table tbl) throws HiveException { + context.getDb().alterTable(tbl, false, null, true); + context.getWork().getInputs().add(new ReadEntity(tbl)); + // We've already locked the table as the input, don't relock it as the output. + DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java similarity index 67% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java index 81fcc4689d..c57aef7690 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java @@ -16,34 +16,42 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table.partition; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** - * DropPartitionDesc. + * DDL task description for ALTER TABLE ... DROP PARTITION ... commands. */ @Explain(displayName = "Drop Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropPartitionDesc extends DDLDesc implements Serializable { +public class AlterTableDropPartitionDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; + static { + DDLTask2.registerOperation(AlterTableDropPartitionDesc.class, AlterTableDropPartitionOperation.class); + } + /** - * PartSpec. + * Partition description. */ - public static class PartSpec implements Serializable { + public static class PartitionDesc implements Serializable { private static final long serialVersionUID = 1L; - private ExprNodeGenericFuncDesc partSpec; + private final ExprNodeGenericFuncDesc partSpec; // TODO: see if we can get rid of this... used in one place to distinguish archived parts - private int prefixLength; + private final int prefixLength; - public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) { + public PartitionDesc(ExprNodeGenericFuncDesc partSpec, int prefixLength) { this.partSpec = partSpec; this.prefixLength = prefixLength; } @@ -58,18 +66,18 @@ public int getPrefixLength() { } private final String tableName; - private final ArrayList partSpecs; + private final ArrayList partSpecs; private final boolean ifPurge; private final ReplicationSpec replicationSpec; - public DropPartitionDesc(String tableName, Map> partSpecs, boolean ifPurge, - ReplicationSpec replicationSpec) { + public AlterTableDropPartitionDesc(String tableName, Map> partSpecs, + boolean ifPurge, ReplicationSpec replicationSpec) { this.tableName = tableName; - this.partSpecs = new ArrayList(partSpecs.size()); + this.partSpecs = new ArrayList(partSpecs.size()); for (Map.Entry> partSpec : partSpecs.entrySet()) { int prefixLength = partSpec.getKey(); for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) { - this.partSpecs.add(new PartSpec(expr, prefixLength)); + this.partSpecs.add(new PartitionDesc(expr, prefixLength)); } } this.ifPurge = ifPurge; @@ -81,12 +89,12 @@ public String getTableName() { return tableName; } - public ArrayList getPartSpecs() { + public ArrayList getPartSpecs() { return partSpecs; } public boolean getIfPurge() { - return ifPurge; + return ifPurge; } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java new file mode 100644 index 0000000000..eae2b51beb --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionOperation.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.PartitionDropOptions; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +import com.google.common.collect.Iterables; + +/** + * Operation process of dropping some partitions of a table. + */ +public class AlterTableDropPartitionOperation extends DDLOperation { + private final AlterTableDropPartitionDesc desc; + + public AlterTableDropPartitionOperation(DDLOperationContext context, AlterTableDropPartitionDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // We need to fetch the table before it is dropped so that it can be passed to post-execution hook + Table tbl = null; + try { + tbl = context.getDb().getTable(desc.getTableName()); + } catch (InvalidTableException e) { + // drop table is idempotent + } + + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + if (replicationSpec.isInReplicationScope()) { + dropPartitionForReplication(tbl, replicationSpec); + } else { + dropPartitions(); + } + + return 0; + } + + private void dropPartitionForReplication(Table tbl, ReplicationSpec replicationSpec) throws HiveException { + /** + * ALTER TABLE DROP PARTITION ... FOR REPLICATION(x) behaves as a DROP PARTITION IF OLDER THAN x + * + * So, we check each partition that matches our DropTableDesc.getPartSpecs(), and drop it only + * if it's older than the event that spawned this replicated request to drop partition + */ + // TODO: Current implementation of replication will result in DROP_PARTITION under replication + // scope being called per-partition instead of multiple partitions. However, to be robust, we + // must still handle the case of multiple partitions in case this assumption changes in the + // future. However, if this assumption changes, we will not be very performant if we fetch + // each partition one-by-one, and then decide on inspection whether or not this is a candidate + // for dropping. Thus, we need a way to push this filter (replicationSpec.allowEventReplacementInto) + // to the metastore to allow it to do drop a partition or not, depending on a Predicate on the + // parameter key values. + + if (tbl == null) { + // If table is missing, then partitions are also would've been dropped. Just no-op. + return; + } + + for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()){ + List partitions = new ArrayList<>(); + try { + context.getDb().getPartitionsByExpr(tbl, partSpec.getPartSpec(), context.getConf(), partitions); + for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())) { + context.getDb().dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true); + } + } catch (NoSuchObjectException e){ + // ignore NSOE because that means there's nothing to drop. + } catch (Exception e) { + throw new HiveException(e.getMessage(), e); + } + } + } + + private void dropPartitions() throws HiveException { + // ifExists is currently verified in DDLSemanticAnalyzer + List droppedParts = context.getDb().dropPartitions(desc.getTableName(), desc.getPartSpecs(), + PartitionDropOptions.instance().deleteData(true).ifExists(true).purgeData(desc.getIfPurge())); + for (Partition partition : droppedParts) { + context.getConsole().printInfo("Dropped the partition " + partition.getName()); + // We have already locked the table, don't lock the partitions. + DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableExchangePartition.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsDesc.java similarity index 53% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableExchangePartition.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsDesc.java index 2a67494b00..176bae838c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableExchangePartition.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsDesc.java @@ -16,52 +16,46 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table.partition; import java.util.Map; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; -public class AlterTableExchangePartition extends DDLDesc { - - // The source table - private Table sourceTable; - - // The destination table - private Table destinationTable; +/** + * DDL task description for ALTER TABLE ... EXCHANGE PARTITION ... WITH TABLE ... commands. + */ +@Explain(displayName = "Exchange Partitions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableExchangePartitionsDesc implements DDLDesc { + static { + DDLTask2.registerOperation(AlterTableExchangePartitionsDesc.class, AlterTableExchangePartitionsOperation.class); + } - // The partition that has to be exchanged - private Map partitionSpecs; + private final Table sourceTable; + private final Table destinationTable; + private final Map partitionSpecs; - public AlterTableExchangePartition(Table sourceTable, Table destinationTable, + public AlterTableExchangePartitionsDesc(Table sourceTable, Table destinationTable, Map partitionSpecs) { - super(); this.sourceTable = sourceTable; this.destinationTable = destinationTable; this.partitionSpecs = partitionSpecs; } - public void setSourceTable(Table sourceTable) { - this.sourceTable = sourceTable; - } - public Table getSourceTable() { - return this.sourceTable; - } - - public void setDestinationTable(Table destinationTable) { - this.destinationTable = destinationTable; + return sourceTable; } public Table getDestinationTable() { - return this.destinationTable; - } - - public void setPartitionSpecs(Map partitionSpecs) { - this.partitionSpecs = partitionSpecs; + return destinationTable; } + @Explain(displayName = "partitions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public Map getPartitionSpecs() { - return this.partitionSpecs; + return partitionSpecs; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsOperation.java new file mode 100644 index 0000000000..65400838d9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableExchangePartitionsOperation.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of exchanging some partitions between tables. + */ +public class AlterTableExchangePartitionsOperation extends DDLOperation { + private final AlterTableExchangePartitionsDesc desc; + + public AlterTableExchangePartitionsOperation(DDLOperationContext context, AlterTableExchangePartitionsDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Map partitionSpecs = desc.getPartitionSpecs(); + Table destTable = desc.getDestinationTable(); + Table sourceTable = desc.getSourceTable(); + + List partitions = context.getDb().exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(), + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + for (Partition partition : partitions) { + // Reuse the partition specs from dest partition since they should be the same + context.getWork().getInputs().add(new ReadEntity(new Partition(sourceTable, partition.getSpec(), null))); + + DDLUtils.addIfAbsentByName(new WriteEntity(new Partition(sourceTable, partition.getSpec(), null), + WriteEntity.WriteType.DELETE), context); + + DDLUtils.addIfAbsentByName(new WriteEntity(new Partition(destTable, partition.getSpec(), null), + WriteEntity.WriteType.INSERT), context); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java new file mode 100644 index 0000000000..b534db126f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import java.io.Serializable; +import java.util.LinkedHashMap; +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... PARTITION ... RENAME TO PARTITION ... commands. + */ +@Explain(displayName = "Rename Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableRenamePartitionDesc implements DDLDesc, Serializable, DDLDescWithWriteId { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableRenamePartitionDesc.class, AlterTableRenamePartitionOperation.class); + } + + private final String tableName; + private final Map oldPartSpec; + private final Map newPartSpec; + private final ReplicationSpec replicationSpec; + private final String fqTableName; + + private long writeId; + + public AlterTableRenamePartitionDesc(String tableName, Map oldPartSpec, + Map newPartSpec, ReplicationSpec replicationSpec, Table table) { + this.tableName = tableName; + this.oldPartSpec = new LinkedHashMap(oldPartSpec); + this.newPartSpec = new LinkedHashMap(newPartSpec); + this.replicationSpec = replicationSpec; + this.fqTableName = table != null ? (table.getDbName() + "." + table.getTableName()) : tableName; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "old partitions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getOldPartSpec() { + return oldPartSpec; + } + + @Explain(displayName = "new partitions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getNewPartSpec() { + return newPartSpec; + } + + /** + * @return what kind of replication scope this rename is running under. + * This can result in a "RENAME IF NEWER THAN" kind of semantic + */ + @Explain(displayName = "replication", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public ReplicationSpec getReplicationSpec() { + return this.replicationSpec; + } + + @Override + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + public long getWriteId() { + return writeId; + } + + @Override + public String getFullTableName() { + return fqTableName; + } + + @Override + public boolean mayNeedWriteId() { + return true; // The check is done when setting this as the ACID DDLDesc. + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionOperation.java new file mode 100644 index 0000000000..b26d346c36 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionOperation.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import java.util.ArrayList; +import java.util.Map; + +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; + +/** + * Operation process of renaming a partition of a table. + */ +public class AlterTableRenamePartitionOperation extends DDLOperation { + private final AlterTableRenamePartitionDesc desc; + + public AlterTableRenamePartitionOperation(DDLOperationContext context, AlterTableRenamePartitionDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + String tableName = desc.getTableName(); + Map oldPartSpec = desc.getOldPartSpec(); + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + + if (!AlterTableUtils.allowOperationInReplicationScope(context.getDb(), tableName, oldPartSpec, replicationSpec)) { + // no rename, the table is missing either due to drop/rename which follows the current rename. + // or the existing table is newer than our update. + LOG.debug("DDLTask: Rename Partition is skipped as table {} / partition {} is newer than update", tableName, + FileUtils.makePartName(new ArrayList<>(oldPartSpec.keySet()), new ArrayList<>(oldPartSpec.values()))); + return 0; + } + + String[] names = Utilities.getDbTableName(tableName); + if (Utils.isBootstrapDumpInProgress(context.getDb(), names[0])) { + LOG.error("DDLTask: Rename Partition not allowed as bootstrap dump in progress"); + throw new HiveException("Rename Partition: Not allowed as bootstrap dump in progress"); + } + + Table tbl = context.getDb().getTable(tableName); + Partition oldPart = context.getDb().getPartition(tbl, oldPartSpec, false); + if (oldPart == null) { + String partName = FileUtils.makePartName(new ArrayList(oldPartSpec.keySet()), + new ArrayList(oldPartSpec.values())); + throw new HiveException("Rename partition: source partition [" + partName + "] does not exist."); + } + + Partition part = context.getDb().getPartition(tbl, oldPartSpec, false); + part.setValues(desc.getNewPartSpec()); + + long writeId = desc.getWriteId(); + if (replicationSpec != null && replicationSpec.isMigratingToTxnTable()) { + Long tmpWriteId = ReplUtils.getMigrationCurrentTblWriteId(context.getConf()); + if (tmpWriteId == null) { + throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); + } + writeId = tmpWriteId; + } + + context.getDb().renamePartition(tbl, oldPartSpec, part, writeId); + Partition newPart = context.getDb().getPartition(tbl, desc.getNewPartSpec(), false); + context.getWork().getInputs().add(new ReadEntity(oldPart)); + + // We've already obtained a lock on the table, don't lock the partition too + DDLUtils.addIfAbsentByName(new WriteEntity(newPart, WriteEntity.WriteType.DDL_NO_LOCK), context); + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsDesc.java similarity index 52% rename from ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java rename to ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsDesc.java index c9ed41a6e5..187a61fd7a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsDesc.java @@ -16,104 +16,52 @@ * limitations under the License. */ -package org.apache.hadoop.hive.ql.plan; +package org.apache.hadoop.hive.ql.ddl.table.partition; import java.io.Serializable; import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; - /** - * ShowPartitionsDesc. - * + * DDL task description for SHOW PARTITIONS commands. */ @Explain(displayName = "Show Partitions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowPartitionsDesc extends DDLDesc implements Serializable { +public class ShowPartitionsDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - String tabName; - String resFile; - // Filter the partitions to show based on on supplied spec - Map partSpec; - /** - * table name for the result of show tables. - */ - private static final String table = "showpartitions"; - /** - * thrift ddl for the result of show tables. - */ - private static final String schema = "partition#string"; - - public String getTable() { - return table; + static { + DDLTask2.registerOperation(ShowPartitionsDesc.class, ShowPartitionsOperation.class); } - public String getSchema() { - return schema; - } + public static final String SCHEMA = "partition#string"; - public ShowPartitionsDesc() { - } + private final String tabName; + private final String resFile; + private final Map partSpec; - /** - * @param tabName - * Name of the table whose partitions need to be listed. - * @param resFile - * File to store the results in - */ - public ShowPartitionsDesc(String tabName, Path resFile, - Map partSpec) { + public ShowPartitionsDesc(String tabName, Path resFile, Map partSpec) { this.tabName = tabName; this.resFile = resFile.toString(); this.partSpec = partSpec; } - /** - * @return the name of the table. - */ @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTabName() { return tabName; } - /** - * @param tabName - * the table whose partitions have to be listed - */ - public void setTabName(String tabName) { - this.tabName = tabName; - } - - /** - * @return the name of the table. - */ @Explain(displayName = "partSpec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public Map getPartSpec() { return partSpec; } - /** - * @param partSpec the partSpec to set. - */ - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } - - /** - * @return the results file - */ @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) public String getResFile() { return resFile; } - - /** - * @param resFile - * the results file to be used to return the results - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsOperation.java new file mode 100644 index 0000000000..5df9474805 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/ShowPartitionsOperation.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.partition; + +import java.io.DataOutputStream; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of showing the partitions of a table. + */ +public class ShowPartitionsOperation extends DDLOperation { + private final ShowPartitionsDesc desc; + + public ShowPartitionsOperation(DDLOperationContext context, ShowPartitionsDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Table tbl = context.getDb().getTable(desc.getTabName()); + if (!tbl.isPartitioned()) { + throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, desc.getTabName()); + } + + List parts = null; + if (desc.getPartSpec() != null) { + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), desc.getPartSpec(), (short) -1); + } else { + parts = context.getDb().getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1); + } + + // write the results in the file + try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { + context.getFormatter().showTablePartitions(outStream, parts); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show partitions for table " + desc.getTabName()); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/package-info.java new file mode 100644 index 0000000000..180e7053ad --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Partition related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.partition; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 3d4ba0110a..1a3d4897db 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -35,13 +35,11 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -50,7 +48,6 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.Msck; import org.apache.hadoop.hive.metastore.MsckInfo; -import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CompactionResponse; @@ -86,12 +83,10 @@ import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PartitionIterable; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; -import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter; import org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataTable; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; @@ -99,15 +94,11 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; -import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; @@ -117,11 +108,9 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc; import org.apache.hadoop.hive.ql.plan.RCFileMergeDesc; -import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.ReplRemoveFirstIncLoadPendFlagDesc; import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; @@ -132,13 +121,6 @@ import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.tools.HadoopArchives; @@ -165,8 +147,6 @@ private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX; private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX; - private MetaDataFormatter formatter; - @Override public boolean requireLock() { return this.work != null && this.work.getNeedLock(); @@ -183,7 +163,6 @@ public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext // Pick the formatter to use to display the results. Either the // normal human readable output or a json object. - formatter = MetaDataFormatUtils.getFormatter(conf); INTERMEDIATE_ARCHIVED_DIR_SUFFIX = HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED); INTERMEDIATE_ORIGINAL_DIR_SUFFIX = @@ -203,12 +182,6 @@ public int execute(DriverContext driverContext) { try { db = Hive.get(conf); - DropPartitionDesc dropPartition = work.getDropPartitionDesc(); - if (dropPartition != null) { - dropPartitions(db, dropPartition); - return 0; - } - AlterTableDesc alterTbl = work.getAlterTblDesc(); if (alterTbl != null) { if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) { @@ -226,16 +199,6 @@ public int execute(DriverContext driverContext) { } } - AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc(); - if (addPartitionDesc != null) { - return addPartitions(db, addPartitionDesc); - } - - RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc(); - if (renamePartitionDesc != null) { - return renamePartition(db, renamePartitionDesc); - } - AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc(); if (simpleDesc != null) { if (simpleDesc.getType() == AlterTableTypes.TOUCH) { @@ -259,11 +222,6 @@ public int execute(DriverContext driverContext) { return showColumns(db, showCols); } - ShowPartitionsDesc showParts = work.getShowPartsDesc(); - if (showParts != null) { - return showPartitions(db, showParts); - } - ShowConfDesc showConf = work.getShowConfDesc(); if (showConf != null) { return showConf(db, showConf); @@ -274,17 +232,6 @@ public int execute(DriverContext driverContext) { return mergeFiles(db, mergeFilesDesc, driverContext); } - AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc(); - if(alterPartDesc != null) { - return alterTableAlterPart(db, alterPartDesc); - } - - AlterTableExchangePartition alterTableExchangePartition = - work.getAlterTableExchangePartition(); - if (alterTableExchangePartition != null) { - return exchangeTablePartition(db, alterTableExchangePartition); - } - CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc(); if (cacheMetadataDesc != null) { return cacheMetadata(db, cacheMetadataDesc); @@ -461,170 +408,6 @@ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, return ret; } - /** - * Add a partitions to a table. - * - * @param db - * Database to add the partition to. - * @param addPartitionDesc - * Add these partitions. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - */ - private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException { - List parts = db.createPartitions(addPartitionDesc); - for (Partition part : parts) { - addIfAbsentByName(new WriteEntity(part, WriteEntity.WriteType.INSERT)); - } - return 0; - } - - /** - * Rename a partition in a table - * - * @param db - * Database to rename the partition. - * @param renamePartitionDesc - * rename old Partition to new one. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - */ - private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) throws HiveException { - String tableName = renamePartitionDesc.getTableName(); - LinkedHashMap oldPartSpec = renamePartitionDesc.getOldPartSpec(); - - if (!allowOperationInReplicationScope(db, tableName, oldPartSpec, renamePartitionDesc.getReplicationSpec())) { - // no rename, the table is missing either due to drop/rename which follows the current rename. - // or the existing table is newer than our update. - if (LOG.isDebugEnabled()) { - LOG.debug("DDLTask: Rename Partition is skipped as table {} / partition {} is newer than update", - tableName, - FileUtils.makePartName(new ArrayList<>(oldPartSpec.keySet()), new ArrayList<>(oldPartSpec.values()))); - } - return 0; - } - - String names[] = Utilities.getDbTableName(tableName); - if (Utils.isBootstrapDumpInProgress(db, names[0])) { - LOG.error("DDLTask: Rename Partition not allowed as bootstrap dump in progress"); - throw new HiveException("Rename Partition: Not allowed as bootstrap dump in progress"); - } - - Table tbl = db.getTable(tableName); - Partition oldPart = db.getPartition(tbl, oldPartSpec, false); - if (oldPart == null) { - String partName = FileUtils.makePartName(new ArrayList(oldPartSpec.keySet()), - new ArrayList(oldPartSpec.values())); - throw new HiveException("Rename partition: source partition [" + partName - + "] does not exist."); - } - Partition part = db.getPartition(tbl, oldPartSpec, false); - part.setValues(renamePartitionDesc.getNewPartSpec()); - long writeId = renamePartitionDesc.getWriteId(); - if (renamePartitionDesc.getReplicationSpec() != null - && renamePartitionDesc.getReplicationSpec().isMigratingToTxnTable()) { - Long tmpWriteId = ReplUtils.getMigrationCurrentTblWriteId(conf); - if (tmpWriteId == null) { - throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); - } - writeId = tmpWriteId; - } - db.renamePartition(tbl, oldPartSpec, part, writeId); - Partition newPart = db.getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false); - work.getInputs().add(new ReadEntity(oldPart)); - // We've already obtained a lock on the table, don't lock the partition too - addIfAbsentByName(new WriteEntity(newPart, WriteEntity.WriteType.DDL_NO_LOCK)); - return 0; - } - - /** - * Alter partition column type in a table - * - * @param db - * Database to rename the partition. - * @param alterPartitionDesc - * change partition column type. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - */ - private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionDesc) - throws HiveException { - - Table tbl = db.getTable(alterPartitionDesc.getTableName(), true); - - // This is checked by DDLSemanticAnalyzer - assert(tbl.isPartitioned()); - - List newPartitionKeys = new ArrayList(); - - //Check if the existing partition values can be type casted to the new column type - // with a non null value before trying to alter the partition column type. - try { - Set partitions = db.getAllPartitionsOf(tbl); - int colIndex = -1; - for(FieldSchema col : tbl.getTTable().getPartitionKeys()) { - colIndex++; - if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) { - break; - } - } - - if (colIndex == -1 || colIndex == tbl.getTTable().getPartitionKeys().size()) { - throw new HiveException("Cannot find partition column " + - alterPartitionDesc.getPartKeySpec().getName()); - } - - TypeInfo expectedType = - TypeInfoUtils.getTypeInfoFromTypeString(alterPartitionDesc.getPartKeySpec().getType()); - ObjectInspector outputOI = - TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); - Converter converter = ObjectInspectorConverters.getConverter( - PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); - - // For all the existing partitions, check if the value can be type casted to a non-null object - for(Partition part : partitions) { - if (part.getName().equals(conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { - continue; - } - try { - String value = part.getValues().get(colIndex); - Object convertedValue = - converter.convert(value); - if (convertedValue == null) { - throw new HiveException(" Converting from " + TypeInfoFactory.stringTypeInfo + " to " + - expectedType + " for value : " + value + " resulted in NULL object"); - } - } catch (Exception e) { - throw new HiveException("Exception while converting " + - TypeInfoFactory.stringTypeInfo + " to " + - expectedType + " for value : " + part.getValues().get(colIndex)); - } - } - } catch(Exception e) { - throw new HiveException( - "Exception while checking type conversion of existing partition values to " + - alterPartitionDesc.getPartKeySpec() + " : " + e.getMessage()); - } - - for(FieldSchema col : tbl.getTTable().getPartitionKeys()) { - if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) { - newPartitionKeys.add(alterPartitionDesc.getPartKeySpec()); - } else { - newPartitionKeys.add(col); - } - } - - tbl.getTTable().setPartitionKeys(newPartitionKeys); - - db.alterTable(tbl, false, null, true); - - work.getInputs().add(new ReadEntity(tbl)); - // We've already locked the table as the input, don't relock it as the output. - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - - return 0; - } - /** * Rewrite the partition's metadata and force the pre/post execute hooks to * be fired. @@ -1344,48 +1127,6 @@ private int msck(Hive db, MsckDesc msckDesc) { } } - /** - * Write a list of partitions to a file. - * - * @param db - * The database in question. - * @param showParts - * These are the partitions we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveException { - // get the partitions for the table and populate the output - String tabName = showParts.getTabName(); - Table tbl = null; - List parts = null; - - tbl = db.getTable(tabName); - - if (!tbl.isPartitioned()) { - throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tabName); - } - if (showParts.getPartSpec() != null) { - parts = db.getPartitionNames(tbl.getDbName(), - tbl.getTableName(), showParts.getPartSpec(), (short) -1); - } else { - parts = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1); - } - - // write the results in the file - DataOutputStream outStream = getOutputStream(showParts.getResFile()); - try { - formatter.showTablePartitions(outStream, parts); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show partitions for table " + tabName); - } finally { - IOUtils.closeStream(outStream); - } - - return 0; - } - /** * Write a list of the columns in the table to a file. * @@ -2188,80 +1929,6 @@ private int updateColumns(Table tbl, Partition part) return 0; } - /** - * Drop a given partitions. - * - * @param db - * The database in question. - * @param dropPartition - * This is the partition we're dropping. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private void dropPartitions(Hive db, DropPartitionDesc dropPartition) throws HiveException { - // We need to fetch the table before it is dropped so that it can be passed to - // post-execution hook - Table tbl = null; - try { - tbl = db.getTable(dropPartition.getTableName()); - } catch (InvalidTableException e) { - // drop table is idempotent - } - - ReplicationSpec replicationSpec = dropPartition.getReplicationSpec(); - if (replicationSpec.isInReplicationScope()){ - /** - * ALTER TABLE DROP PARTITION ... FOR REPLICATION(x) behaves as a DROP PARTITION IF OLDER THAN x - * - * So, we check each partition that matches our DropTableDesc.getPartSpecs(), and drop it only - * if it's older than the event that spawned this replicated request to drop partition - */ - // TODO: Current implementation of replication will result in DROP_PARTITION under replication - // scope being called per-partition instead of multiple partitions. However, to be robust, we - // must still handle the case of multiple partitions in case this assumption changes in the - // future. However, if this assumption changes, we will not be very performant if we fetch - // each partition one-by-one, and then decide on inspection whether or not this is a candidate - // for dropping. Thus, we need a way to push this filter (replicationSpec.allowEventReplacementInto) - // to the metastore to allow it to do drop a partition or not, depending on a Predicate on the - // parameter key values. - - if (tbl == null) { - // If table is missing, then partitions are also would've been dropped. Just no-op. - return; - } - - for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()){ - List partitions = new ArrayList<>(); - try { - db.getPartitionsByExpr(tbl, partSpec.getPartSpec(), conf, partitions); - for (Partition p : Iterables.filter(partitions, - replicationSpec.allowEventReplacementInto())){ - db.dropPartition(tbl.getDbName(),tbl.getTableName(),p.getValues(),true); - } - } catch (NoSuchObjectException e){ - // ignore NSOE because that means there's nothing to drop. - } catch (Exception e) { - throw new HiveException(e.getMessage(), e); - } - } - return; - } - - // ifExists is currently verified in DDLSemanticAnalyzer - List droppedParts - = db.dropPartitions(dropPartition.getTableName(), - dropPartition.getPartSpecs(), - PartitionDropOptions.instance() - .deleteData(true) - .ifExists(true) - .purgeData(dropPartition.getIfPurge())); - for (Partition partition : droppedParts) { - console.printInfo("Dropped the partition " + partition.getName()); - // We have already locked the table, don't lock the partitions. - addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - /** * Update last_modified_by and last_modified_time parameters in parameter map. * @@ -2294,30 +1961,6 @@ public static void validateSerDe(String serdeName, HiveConf conf) throws HiveExc } } - private int exchangeTablePartition(Hive db, - AlterTableExchangePartition exchangePartition) throws HiveException { - Map partitionSpecs = exchangePartition.getPartitionSpecs(); - Table destTable = exchangePartition.getDestinationTable(); - Table sourceTable = exchangePartition.getSourceTable(); - List partitions = - db.exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(),destTable.getDbName(), - destTable.getTableName()); - - for(Partition partition : partitions) { - // Reuse the partition specs from dest partition since they should be the same - work.getInputs().add(new ReadEntity(new Partition(sourceTable, partition.getSpec(), null))); - - addIfAbsentByName(new WriteEntity(new Partition(sourceTable, partition.getSpec(), null), - WriteEntity.WriteType.DELETE)); - - addIfAbsentByName(new WriteEntity(new Partition(destTable, partition.getSpec(), null), - WriteEntity.WriteType.INSERT)); - } - - return 0; - } - @Override public StageType getType() { return StageType.DDL; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java index b59ab6c319..251193e161 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/PartitionEvent.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hive.ql.exec.repl.bootstrap.events; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; public interface PartitionEvent extends TableEvent { - AddPartitionDesc lastPartitionReplicated(); + AlterTableAddPartitionDesc lastPartitionReplicated(); TableEvent asTableEvent(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java index 3dcc1d713c..992a4caddd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java @@ -18,9 +18,9 @@ Licensed to the Apache Software Foundation (ASF) under one package org.apache.hadoop.hive.ql.exec.repl.bootstrap.events; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import java.util.List; @@ -28,7 +28,7 @@ Licensed to the Apache Software Foundation (ASF) under one public interface TableEvent extends BootstrapEvent { ImportTableDesc tableDesc(String dbName) throws SemanticException; - List partitionDescriptions(ImportTableDesc tblDesc) + List partitionDescriptions(ImportTableDesc tblDesc) throws SemanticException; List partitions(ImportTableDesc tblDesc) diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java index ee804e862a..0c27a9965f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java @@ -19,12 +19,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.PartitionEvent; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.TableEvent; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.ReplicationState; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import java.util.List; @@ -46,7 +46,7 @@ public EventType eventType() { } @Override - public AddPartitionDesc lastPartitionReplicated() { + public AlterTableAddPartitionDesc lastPartitionReplicated() { assert replicationState != null && replicationState.partitionState != null; return replicationState.partitionState.lastReplicatedPartition; } @@ -62,7 +62,7 @@ public ImportTableDesc tableDesc(String dbName) throws SemanticException { } @Override - public List partitionDescriptions(ImportTableDesc tblDesc) + public List partitionDescriptions(ImportTableDesc tblDesc) throws SemanticException { return tableEvent.partitionDescriptions(tblDesc); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java index 27009f0385..64f9af3aba 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.TableEvent; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -36,7 +37,6 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration; @@ -140,13 +140,13 @@ public ImportTableDesc tableDesc(String dbName) throws SemanticException { } @Override - public List partitionDescriptions(ImportTableDesc tblDesc) + public List partitionDescriptions(ImportTableDesc tblDesc) throws SemanticException { - List descs = new ArrayList<>(); + List descs = new ArrayList<>(); //TODO: if partitions are loaded lazily via the iterator then we will have to avoid conversion of everything here as it defeats the purpose. for (Partition partition : metadata.getPartitions()) { // TODO: this should ideally not create AddPartitionDesc per partition - AddPartitionDesc partsDesc = partitionDesc(fromPath, tblDesc, partition); + AlterTableAddPartitionDesc partsDesc = partitionDesc(fromPath, tblDesc, partition); descs.add(partsDesc); } return descs; @@ -167,14 +167,14 @@ public ImportTableDesc tableDesc(String dbName) throws SemanticException { return partitions; } - private AddPartitionDesc partitionDesc(Path fromPath, + private AlterTableAddPartitionDesc partitionDesc(Path fromPath, ImportTableDesc tblDesc, Partition partition) throws SemanticException { try { - AddPartitionDesc partsDesc = - new AddPartitionDesc(tblDesc.getDatabaseName(), tblDesc.getTableName(), + AlterTableAddPartitionDesc partsDesc = + new AlterTableAddPartitionDesc(tblDesc.getDatabaseName(), tblDesc.getTableName(), EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()), partition.getSd().getLocation(), partition.getParameters()); - AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0); + AlterTableAddPartitionDesc.PartitionDesc partDesc = partsDesc.getPartition(0); partDesc.setInputFormat(partition.getSd().getInputFormat()); partDesc.setOutputFormat(partition.getSd().getOutputFormat()); partDesc.setNumBuckets(partition.getSd().getNumBuckets()); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java index 5a4dc4c5c0..e15dec340d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/ReplicationState.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; - import java.io.Serializable; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; + public class ReplicationState implements Serializable { public static class PartitionState { final String tableName; - public final AddPartitionDesc lastReplicatedPartition; + public final AlterTableAddPartitionDesc lastReplicatedPartition; - public PartitionState(String tableName, AddPartitionDesc lastReplicatedPartition) { + public PartitionState(String tableName, AlterTableAddPartitionDesc lastReplicatedPartition) { this.tableName = tableName; this.lastReplicatedPartition = lastReplicatedPartition; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index 0add38b213..6e19cb4b69 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -23,6 +23,9 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -42,9 +45,6 @@ import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; @@ -76,7 +76,7 @@ private final TableContext tableContext; private final TableEvent event; private final TaskTracker tracker; - private final AddPartitionDesc lastReplicatedPartition; + private final AlterTableAddPartitionDesc lastReplicatedPartition; private final ImportTableDesc tableDesc; private Table table; @@ -89,7 +89,7 @@ public LoadPartitions(Context context, ReplLogger replLogger, TaskTracker tableT public LoadPartitions(Context context, ReplLogger replLogger, TableContext tableContext, TaskTracker limiter, TableEvent event, String dbNameToLoadIn, - AddPartitionDesc lastReplicatedPartition) throws HiveException { + AlterTableAddPartitionDesc lastReplicatedPartition) throws HiveException { this.tracker = new TaskTracker(limiter); this.event = event; this.context = context; @@ -126,7 +126,7 @@ public TaskTracker tasks() throws Exception { } else { // existing if (table.isPartitioned()) { - List partitionDescs = event.partitionDescriptions(tableDesc); + List partitionDescs = event.partitionDescriptions(tableDesc); if (!event.replicationSpec().isMetadataOnly() && !partitionDescs.isEmpty()) { updateReplicationState(initialReplicationState()); if (!forExistingTable(lastReplicatedPartition).hasReplicationState()) { @@ -155,9 +155,9 @@ private ReplicationState initialReplicationState() throws SemanticException { } private TaskTracker forNewTable() throws Exception { - Iterator iterator = event.partitionDescriptions(tableDesc).iterator(); + Iterator iterator = event.partitionDescriptions(tableDesc).iterator(); while (iterator.hasNext() && tracker.canAddMoreTasks()) { - AddPartitionDesc currentPartitionDesc = iterator.next(); + AlterTableAddPartitionDesc currentPartitionDesc = iterator.next(); /* the currentPartitionDesc cannot be inlined as we need the hasNext() to be evaluated post the current retrieved lastReplicatedPartition @@ -167,7 +167,7 @@ the currentPartitionDesc cannot be inlined as we need the hasNext() to be evalua return tracker; } - private void addPartition(boolean hasMorePartitions, AddPartitionDesc addPartitionDesc, Task ptnRootTask) + private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc addPartitionDesc, Task ptnRootTask) throws Exception { tracker.addTask(tasksForAddPartition(table, addPartitionDesc, ptnRootTask)); if (hasMorePartitions && !tracker.canAddMoreTasks()) { @@ -180,9 +180,9 @@ private void addPartition(boolean hasMorePartitions, AddPartitionDesc addPartiti /** * returns the root task for adding a partition */ - private Task tasksForAddPartition(Table table, AddPartitionDesc addPartitionDesc, Task ptnRootTask) + private Task tasksForAddPartition(Table table, AlterTableAddPartitionDesc addPartitionDesc, Task ptnRootTask) throws MetaException, HiveException { - AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0); + AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartition(0); Path sourceWarehousePartitionLocation = new Path(partSpec.getLocation()); Path replicaWarehousePartitionLocation = locationOnReplicaWarehouse(table, partSpec); partSpec.setLocation(replicaWarehousePartitionLocation.toString()); @@ -191,7 +191,7 @@ private void addPartition(boolean hasMorePartitions, AddPartitionDesc addPartiti + partSpec.getLocation()); Task addPartTask = TaskFactory.get( - new DDLWork(new HashSet<>(), new HashSet<>(), addPartitionDesc), + new DDLWork2(new HashSet<>(), new HashSet<>(), addPartitionDesc), context.hiveConf ); @@ -273,7 +273,7 @@ private void addPartition(boolean hasMorePartitions, AddPartitionDesc addPartiti /** * This will create the move of partition data from temp path to actual path */ - private Task movePartitionTask(Table table, AddPartitionDesc.OnePartitionDesc partSpec, Path tmpPath, + private Task movePartitionTask(Table table, AlterTableAddPartitionDesc.PartitionDesc partSpec, Path tmpPath, LoadFileType loadFileType) { MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false); if (AcidUtils.isTransactionalTable(table)) { @@ -318,7 +318,7 @@ private void addPartition(boolean hasMorePartitions, AddPartitionDesc addPartiti * path will always be a child on target. */ - private Path locationOnReplicaWarehouse(Table table, AddPartitionDesc.OnePartitionDesc partSpec) + private Path locationOnReplicaWarehouse(Table table, AlterTableAddPartitionDesc.PartitionDesc partSpec) throws MetaException, HiveException { String child = Warehouse.makePartPath(partSpec.getPartSpec()); if (tableDesc.isExternal()) { @@ -345,20 +345,20 @@ private Path locationOnReplicaWarehouse(Table table, AddPartitionDesc.OnePartiti } private Task dropPartitionTask(Table table, Map partSpec) throws SemanticException { - Task dropPtnTask = null; + Task dropPtnTask = null; Map> partSpecsExpr = ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec)); if (partSpecsExpr.size() > 0) { - DropPartitionDesc dropPtnDesc = new DropPartitionDesc(table.getFullyQualifiedName(), partSpecsExpr, true, - event.replicationSpec()); + AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(table.getFullyQualifiedName(), + partSpecsExpr, true, event.replicationSpec()); dropPtnTask = TaskFactory.get( - new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf + new DDLWork2(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf ); } return dropPtnTask; } - private TaskTracker forExistingTable(AddPartitionDesc lastPartitionReplicated) throws Exception { + private TaskTracker forExistingTable(AlterTableAddPartitionDesc lastPartitionReplicated) throws Exception { boolean encounteredTheLastReplicatedPartition = (lastPartitionReplicated == null); Map lastReplicatedPartSpec = null; if (!encounteredTheLastReplicatedPartition) { @@ -367,15 +367,15 @@ private TaskTracker forExistingTable(AddPartitionDesc lastPartitionReplicated) t StringUtils.mapToString(lastReplicatedPartSpec)); } - Iterator partitionIterator = event.partitionDescriptions(tableDesc).iterator(); + Iterator partitionIterator = event.partitionDescriptions(tableDesc).iterator(); while (!encounteredTheLastReplicatedPartition && partitionIterator.hasNext()) { - AddPartitionDesc addPartitionDesc = partitionIterator.next(); + AlterTableAddPartitionDesc addPartitionDesc = partitionIterator.next(); Map currentSpec = addPartitionDesc.getPartition(0).getPartSpec(); encounteredTheLastReplicatedPartition = lastReplicatedPartSpec.equals(currentSpec); } while (partitionIterator.hasNext() && tracker.canAddMoreTasks()) { - AddPartitionDesc addPartitionDesc = partitionIterator.next(); + AlterTableAddPartitionDesc addPartitionDesc = partitionIterator.next(); Map partSpec = addPartitionDesc.getPartition(0).getPartSpec(); Task ptnRootTask = null; ReplLoadOpType loadPtnType = getLoadPartitionType(partSpec); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 111cd1dc3f..c9d8843b78 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -112,6 +112,8 @@ import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.FunctionTask; @@ -127,8 +129,6 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; @@ -2990,7 +2990,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws } } - public List createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException { + public List createPartitions(AlterTableAddPartitionDesc addPartitionDesc) throws HiveException { // TODO: catalog name everywhere in this method Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); int size = addPartitionDesc.getPartitionCount(); @@ -3084,7 +3084,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws } public static org.apache.hadoop.hive.metastore.api.Partition convertAddSpecToMetaPartition( - Table tbl, AddPartitionDesc.OnePartitionDesc addSpec, final HiveConf conf) throws HiveException { + Table tbl, AlterTableAddPartitionDesc.PartitionDesc addSpec, final HiveConf conf) throws HiveException { Path location = addSpec.getLocation() != null ? new Path(tbl.getPath(), addSpec.getLocation()) : null; if (location != null) { @@ -3434,7 +3434,7 @@ public boolean dropPartition(String dbName, String tableName, List partV public List dropPartitions(Table table, ListpartDirNames, boolean deleteData, boolean ifExists) throws HiveException { // partitions to be dropped in this batch - List partSpecs = new ArrayList<>(partDirNames.size()); + List partSpecs = new ArrayList<>(partDirNames.size()); // parts of the partition String[] parts = null; @@ -3484,7 +3484,7 @@ public boolean dropPartition(String dbName, String tableName, List partV } // Add the expression to partition specification - partSpecs.add(new DropPartitionDesc.PartSpec(expr, partSpecKey)); + partSpecs.add(new AlterTableDropPartitionDesc.PartitionDesc(expr, partSpecKey)); // Increment dropKey to get a new key for hash map ++partSpecKey; @@ -3494,14 +3494,14 @@ public boolean dropPartition(String dbName, String tableName, List partV return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); } - public List dropPartitions(String tblName, List partSpecs, + public List dropPartitions(String tblName, List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { String[] names = Utilities.getDbTableName(tblName); return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); } public List dropPartitions(String dbName, String tblName, - List partSpecs, boolean deleteData, + List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { return dropPartitions(dbName, tblName, partSpecs, PartitionDropOptions.instance() @@ -3509,19 +3509,20 @@ public boolean dropPartition(String dbName, String tableName, List partV .ifExists(ifExists)); } - public List dropPartitions(String tblName, List partSpecs, + public List dropPartitions(String tblName, List partSpecs, PartitionDropOptions dropOptions) throws HiveException { String[] names = Utilities.getDbTableName(tblName); return dropPartitions(names[0], names[1], partSpecs, dropOptions); } public List dropPartitions(String dbName, String tblName, - List partSpecs, PartitionDropOptions dropOptions) throws HiveException { + List partSpecs, PartitionDropOptions dropOptions) + throws HiveException { try { Table tbl = getTable(dbName, tblName); List> partExprs = new ArrayList<>(partSpecs.size()); - for (DropPartitionDesc.PartSpec partSpec : partSpecs) { + for (AlterTableDropPartitionDesc.PartitionDesc partSpec : partSpecs) { partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec()))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 3bf2a43b01..99d7f21228 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -101,6 +101,13 @@ import org.apache.hadoop.hive.ql.ddl.table.lock.ShowLocksDesc; import org.apache.hadoop.hive.ql.ddl.table.lock.UnlockTableDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAlterPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableExchangePartitionsDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableRenamePartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc.PartitionDesc; import org.apache.hadoop.hive.ql.ddl.view.AlterMaterializedViewRewriteDesc; import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolAddTriggerDesc; import org.apache.hadoop.hive.ql.ddl.workloadmanagement.AlterPoolDropTriggerDesc; @@ -145,12 +152,8 @@ import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactory; import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; -import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; @@ -158,7 +161,6 @@ import org.apache.hadoop.hive.ql.plan.DDLDesc; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -169,10 +171,8 @@ import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.session.SessionState; @@ -868,10 +868,9 @@ private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws Se // If any destination partition is present then throw a Semantic Exception. throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString())); } - AlterTableExchangePartition alterTableExchangePartition = - new AlterTableExchangePartition(sourceTable, destTable, partSpecs); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTableExchangePartition))); + AlterTableExchangePartitionsDesc alterTableExchangePartition = + new AlterTableExchangePartitionsDesc(sourceTable, destTable, partSpecs); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTableExchangePartition))); inputs.add(new ReadEntity(sourceTable)); outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED)); @@ -2617,9 +2616,8 @@ private void analyzeShowPartitions(ASTNode ast) throws SemanticException { showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); inputs.add(new ReadEntity(getTable(tableName))); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showPartsDesc))); - setFetchTask(createFetchTask(showPartsDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showPartsDesc))); + setFetchTask(createFetchTask(ShowPartitionsDesc.SCHEMA)); } private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException { @@ -3322,13 +3320,12 @@ private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, partSpecs.add(oldPartSpec); partSpecs.add(newPartSpec); addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE); - RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc( - tblName, oldPartSpec, newPartSpec, null, tab); + AlterTableRenamePartitionDesc renamePartitionDesc = new AlterTableRenamePartitionDesc(tblName, oldPartSpec, + newPartSpec, null, tab); if (AcidUtils.isTransactionalTable(tab)) { setAcidDdlDesc(renamePartitionDesc); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - renamePartitionDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), renamePartitionDesc))); } private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, @@ -3420,9 +3417,9 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists); - DropPartitionDesc dropTblDesc = - new DropPartitionDesc(getDotName(qualified), partSpecs, mustPurge, replicationSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc))); + AlterTableDropPartitionDesc dropTblDesc = + new AlterTableDropPartitionDesc(getDotName(qualified), partSpecs, mustPurge, replicationSpec); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), dropTblDesc))); } private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) @@ -3467,14 +3464,13 @@ private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol.getName())); } - AlterTableAlterPartDesc alterTblAlterPartDesc = - new AlterTableAlterPartDesc(getDotName(qualified), newCol); + AlterTableAlterPartitionDesc alterTblAlterPartDesc = + new AlterTableAlterPartitionDesc(getDotName(qualified), newCol); if (AcidUtils.isTransactionalTable(tab)) { setAcidDdlDesc(alterTblAlterPartDesc); } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblAlterPartDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblAlterPartDesc))); } /** @@ -3513,8 +3509,8 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole String currentLocation = null; Map currentPart = null; // Parser has done some verification, so the order of tokens doesn't need to be verified here. - AddPartitionDesc addPartitionDesc = - new AddPartitionDesc(tab.getDbName(), tab.getTableName(), ifNotExists); + AlterTableAddPartitionDesc addPartitionDesc = + new AlterTableAddPartitionDesc(tab.getDbName(), tab.getTableName(), ifNotExists); for (int num = start; num < numCh; num++) { ASTNode child = (ASTNode) ast.getChild(num); switch (child.getToken().getType()) { @@ -3546,7 +3542,7 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { for (int index = 0; index < addPartitionDesc.getPartitionCount(); index++) { - OnePartitionDesc desc = addPartitionDesc.getPartition(index); + PartitionDesc desc = addPartitionDesc.getPartition(index); if (desc.getLocation() == null) { if (desc.getPartParams() == null) { desc.setPartParams(new HashMap()); @@ -3562,8 +3558,8 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole return; } - Task ddlTask = - TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc)); + Task ddlTask = + TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), addPartitionDesc)); rootTasks.add(ddlTask); handleTransactionalTable(tab, addPartitionDesc, ddlTask); @@ -3577,7 +3573,7 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole cmd.append(" WHERE "); boolean firstOr = true; for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) { - AddPartitionDesc.OnePartitionDesc partitionDesc = addPartitionDesc.getPartition(i); + AlterTableAddPartitionDesc.PartitionDesc partitionDesc = addPartitionDesc.getPartition(i); if (firstOr) { firstOr = false; } else { @@ -3614,7 +3610,7 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole * Add partition for Transactional tables needs to add (copy/rename) the data so that it lands * in a delta_x_x/ folder in the partition dir. */ - private void handleTransactionalTable(Table tab, AddPartitionDesc addPartitionDesc, + private void handleTransactionalTable(Table tab, AlterTableAddPartitionDesc addPartitionDesc, Task ddlTask) throws SemanticException { if(!AcidUtils.isTransactionalTable(tab)) { return; @@ -3623,7 +3619,7 @@ private void handleTransactionalTable(Table tab, AddPartitionDesc addPartitionDe int stmtId = 0; for (int index = 0; index < addPartitionDesc.getPartitionCount(); index++) { - OnePartitionDesc desc = addPartitionDesc.getPartition(index); + PartitionDesc desc = addPartitionDesc.getPartition(index); if (desc.getLocation() != null) { AcidUtils.validateAcidPartitionLocation(desc.getLocation(), conf); if(addPartitionDesc.isIfNotExists()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 38d9940bd0..4225b7bb86 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork2; import org.apache.hadoop.hive.ql.ddl.table.creation.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -55,10 +56,8 @@ import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType; @@ -327,11 +326,11 @@ public static boolean prepareImport(boolean isImportCmd, tblDesc.setTableName(parsedTableName); } - List partitionDescs = new ArrayList<>(); + List partitionDescs = new ArrayList<>(); Iterable partitions = rv.getPartitions(); for (Partition partition : partitions) { // TODO: this should ideally not create AddPartitionDesc per partition - AddPartitionDesc partsDesc = + AlterTableAddPartitionDesc partsDesc = getBaseAddPartitionDescFromPartition(fromPath, dbname, tblDesc, partition, replicationSpec, x.getConf()); partitionDescs.add(partsDesc); @@ -341,9 +340,9 @@ public static boolean prepareImport(boolean isImportCmd, // The import specification asked for only a particular partition to be loaded // We load only that, and ignore all the others. boolean found = false; - for (Iterator partnIter = partitionDescs + for (Iterator partnIter = partitionDescs .listIterator(); partnIter.hasNext(); ) { - AddPartitionDesc addPartitionDesc = partnIter.next(); + AlterTableAddPartitionDesc addPartitionDesc = partnIter.next(); if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(parsedPartSpec)) { found = true; } else { @@ -363,7 +362,7 @@ public static boolean prepareImport(boolean isImportCmd, throw new SemanticException(ErrorMsg.NEED_TABLE_SPECIFICATION.getMsg()); } else { x.getConf().set("import.destination.table", tblDesc.getTableName()); - for (AddPartitionDesc addPartitionDesc : partitionDescs) { + for (AlterTableAddPartitionDesc addPartitionDesc : partitionDescs) { addPartitionDesc.setTableName(tblDesc.getTableName()); } } @@ -411,14 +410,14 @@ public static boolean prepareImport(boolean isImportCmd, return tableExists; } - private static AddPartitionDesc getBaseAddPartitionDescFromPartition( + private static AlterTableAddPartitionDesc getBaseAddPartitionDescFromPartition( Path fromPath, String dbName, ImportTableDesc tblDesc, Partition partition, ReplicationSpec replicationSpec, HiveConf conf) throws MetaException, SemanticException { - AddPartitionDesc partsDesc = new AddPartitionDesc(dbName, tblDesc.getTableName(), + AlterTableAddPartitionDesc partsDesc = new AlterTableAddPartitionDesc(dbName, tblDesc.getTableName(), EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()), partition.getSd().getLocation(), partition.getParameters()); - AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0); + AlterTableAddPartitionDesc.PartitionDesc partDesc = partsDesc.getPartition(0); partDesc.setInputFormat(partition.getSd().getInputFormat()); partDesc.setOutputFormat(partition.getSd().getOutputFormat()); partDesc.setNumBuckets(partition.getSd().getNumBuckets()); @@ -565,28 +564,28 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, } private static Task alterSinglePartition( - ImportTableDesc tblDesc, Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, + ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, IOException, HiveException { addPartitionDesc.setReplaceMode(true); if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())) { addPartitionDesc.setReplicationSpec(replicationSpec); } - AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0); + AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartition(0); if (ptn == null) { fixLocationInPartSpec(tblDesc, table, wh, replicationSpec, partSpec, x); } else if (!externalTablePartition(tblDesc, replicationSpec)) { partSpec.setLocation(ptn.getLocation()); // use existing location } - return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); + return TaskFactory.get(new DDLWork2(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); } private static Task addSinglePartition(ImportTableDesc tblDesc, - Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, + Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId) throws MetaException, IOException, HiveException { - AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0); + AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartition(0); boolean isAutoPurge = false; boolean needRecycle = false; boolean copyToMigratedTxnTable = replicationSpec.isMigratingToTxnTable(); @@ -598,7 +597,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, // addPartitionDesc already has the right partition location @SuppressWarnings("unchecked") Task addPartTask = TaskFactory.get( - new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); + new DDLWork2(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); return addPartTask; } else { String srcLocation = partSpec.getLocation(); @@ -658,7 +657,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, // the partition/s to be already added or altered by previous events. So no need to // create add partition event again. addPartTask = TaskFactory.get( - new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); + new DDLWork2(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); } MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), @@ -729,7 +728,7 @@ private static boolean shouldSkipDataCopyInReplScope(ImportTableDesc tblDesc, Re * Helper method to set location properly in partSpec */ private static void fixLocationInPartSpec(ImportTableDesc tblDesc, Table table, - Warehouse wh, ReplicationSpec replicationSpec, AddPartitionDesc.OnePartitionDesc partSpec, + Warehouse wh, ReplicationSpec replicationSpec, AlterTableAddPartitionDesc.PartitionDesc partSpec, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, HiveException, IOException { if (externalTablePartition(tblDesc, replicationSpec)) { /* @@ -1030,7 +1029,7 @@ private static String checkParams(Map map1, * @param wh */ private static void createRegularImportTasks( - ImportTableDesc tblDesc, List partitionDescs, boolean isPartSpecSet, + ImportTableDesc tblDesc, List partitionDescs, boolean isPartSpecSet, ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId) throws HiveException, IOException, MetaException { @@ -1039,7 +1038,7 @@ private static void createRegularImportTasks( if (table.isPartitioned()) { x.getLOG().debug("table partitioned"); - for (AddPartitionDesc addPartitionDesc : partitionDescs) { + for (AlterTableAddPartitionDesc addPartitionDesc : partitionDescs) { Map partSpec = addPartitionDesc.getPartition(0).getPartSpec(); org.apache.hadoop.hive.ql.metadata.Partition ptn = null; if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) { @@ -1073,7 +1072,7 @@ private static void createRegularImportTasks( x.getOutputs().add(new WriteEntity(parentDb, WriteEntity.WriteType.DDL_SHARED)); if (isPartitioned(tblDesc)) { - for (AddPartitionDesc addPartitionDesc : partitionDescs) { + for (AlterTableAddPartitionDesc addPartitionDesc : partitionDescs) { t.addDependentTask(addSinglePartition(tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId)); } @@ -1122,7 +1121,7 @@ private static Table createNewTableMetadataObject(ImportTableDesc tblDesc, boole */ private static void createReplImportTasks( ImportTableDesc tblDesc, - List partitionDescs, + List partitionDescs, ReplicationSpec replicationSpec, boolean waitOnPrecursor, Table table, URI fromURI, Warehouse wh, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, @@ -1226,7 +1225,7 @@ private static void createReplImportTasks( List> dependentTasks = null; if (isPartitioned(tblDesc)) { dependentTasks = new ArrayList<>(partitionDescs.size()); - for (AddPartitionDesc addPartitionDesc : partitionDescs) { + for (AlterTableAddPartitionDesc addPartitionDesc : partitionDescs) { addPartitionDesc.setReplicationSpec(replicationSpec); if (!replicationSpec.isMetadataOnly()) { dependentTasks.add(addSinglePartition(tblDesc, table, wh, addPartitionDesc, @@ -1289,7 +1288,7 @@ private static void createReplImportTasks( // Table existed, and is okay to replicate into, not dropping and re-creating. if (isPartitioned(tblDesc)) { x.getLOG().debug("table partitioned"); - for (AddPartitionDesc addPartitionDesc : partitionDescs) { + for (AlterTableAddPartitionDesc addPartitionDesc : partitionDescs) { addPartitionDesc.setReplicationSpec(replicationSpec); Map partSpec = addPartitionDesc.getPartition(0).getPartSpec(); org.apache.hadoop.hive.ql.metadata.Partition ptn = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index 5e88b6ebae..98a4b71781 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -18,13 +18,13 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import java.io.Serializable; @@ -43,10 +43,10 @@ Map> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions()); if (partSpecs.size() > 0) { - DropPartitionDesc dropPtnDesc = new DropPartitionDesc(actualDbName + "." + actualTblName, partSpecs, true, - context.eventOnlyReplicationSpec()); - Task dropPtnTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf + AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(actualDbName + "." + actualTblName, + partSpecs, true, context.eventOnlyReplicationSpec()); + Task dropPtnTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf ); context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions()); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java index 9c66210e70..2c30641254 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java @@ -20,13 +20,13 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableRenamePartitionDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import java.io.Serializable; import java.util.Iterator; @@ -60,11 +60,11 @@ replicationSpec.setMigratingToTxnTable(); } - RenamePartitionDesc renamePtnDesc = new RenamePartitionDesc( + AlterTableRenamePartitionDesc renamePtnDesc = new AlterTableRenamePartitionDesc( tableName, oldPartSpec, newPartSpec, replicationSpec, null); renamePtnDesc.setWriteId(msg.getWriteId()); - Task renamePtnTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc), context.hiveConf); + Task renamePtnTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, renamePtnDesc), context.hiveConf); context.log.debug("Added rename ptn task : {}:{}->{}", renamePtnTask.getId(), oldPartSpec, newPartSpec); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, newPartSpec); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableAlterPartDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableAlterPartDesc.java deleted file mode 100644 index 652c007643..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableAlterPartDesc.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; - -public class AlterTableAlterPartDesc extends DDLDesc implements DDLDesc.DDLDescWithWriteId { - private String fqTableName; - private FieldSchema partKeySpec; - private long writeId; - - public AlterTableAlterPartDesc() { - } - - /** - * @param fqTableName - * table containing the partition - * @param partKeySpec - */ - public AlterTableAlterPartDesc(String fqTableName, FieldSchema partKeySpec) { - super(); - this.fqTableName = fqTableName; - this.partKeySpec = partKeySpec; - } - - public String getTableName() { - return fqTableName; - } - - public void setTableName(String tableName) { - this.fqTableName = tableName; - } - - public FieldSchema getPartKeySpec() { - return partKeySpec; - } - - public void setPartKeySpec(FieldSchema partKeySpec) { - this.partKeySpec = partKeySpec; - } - - @Override - public void setWriteId(long writeId) { - this.writeId = writeId; - } - - @Override - public String getFullTableName() { - return fqTableName; - } - - @Override - public boolean mayNeedWriteId() { - return true; // Checked before setting as the acid desc. - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 0505e07db4..6cd84bb8ab 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -34,16 +34,10 @@ // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates. private InsertCommitHookDesc insertCommitHookDesc; - private DropPartitionDesc dropPartitionDesc; private AlterTableDesc alterTblDesc; private ShowColumnsDesc showColumnsDesc; - private ShowPartitionsDesc showPartsDesc; - private AddPartitionDesc addPartitionDesc; - private RenamePartitionDesc renamePartitionDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; - private AlterTableAlterPartDesc alterTableAlterPartDesc; - private AlterTableExchangePartition alterTableExchangePartition; private ShowConfDesc showConfDesc; @@ -86,13 +80,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.alterTblDesc = alterTblDesc; } - public DDLWork(HashSet inputs, HashSet outputs, - DropPartitionDesc dropPartitionDesc) { - this(inputs, outputs); - - this.dropPartitionDesc = dropPartitionDesc; - } - /** * @param showColumnsDesc */ @@ -103,38 +90,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showColumnsDesc = showColumnsDesc; } - /** - * @param showPartsDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowPartitionsDesc showPartsDesc) { - this(inputs, outputs); - - this.showPartsDesc = showPartsDesc; - } - - /** - * @param addPartitionDesc - * information about the partitions we want to add. - */ - public DDLWork(HashSet inputs, HashSet outputs, - AddPartitionDesc addPartitionDesc) { - this(inputs, outputs); - - this.addPartitionDesc = addPartitionDesc; - } - - /** - * @param renamePartitionDesc - * information about the partitions we want to add. - */ - public DDLWork(HashSet inputs, HashSet outputs, - RenamePartitionDesc renamePartitionDesc) { - this(inputs, outputs); - - this.renamePartitionDesc = renamePartitionDesc; - } - /** * @param inputs * @param outputs @@ -160,18 +115,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.mergeFilesDesc = mergeDesc; } - public DDLWork(HashSet inputs, HashSet outputs, - AlterTableAlterPartDesc alterPartDesc) { - this(inputs, outputs); - this.alterTableAlterPartDesc = alterPartDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, - AlterTableExchangePartition alterTableExchangePartition) { - this(inputs, outputs); - this.alterTableExchangePartition = alterTableExchangePartition; - } - public DDLWork(HashSet inputs, HashSet outputs, CacheMetadataDesc cacheMetadataDesc) { this(inputs, outputs); @@ -191,14 +134,6 @@ public DDLWork(HashSet inputs, HashSet outputs, this.replSetFirstIncLoadFlagDesc = replSetFirstIncLoadFlagDesc; } - /** - * @return the dropTblDesc - */ - @Explain(displayName = "Drop Partition Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public DropPartitionDesc getDropPartitionDesc() { - return dropPartitionDesc; - } - /** * @return the alterTblDesc */ @@ -215,29 +150,6 @@ public ShowColumnsDesc getShowColumnsDesc() { return showColumnsDesc; } - /** - * @return the showPartsDesc - */ - @Explain(displayName = "Show Partitions Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowPartitionsDesc getShowPartsDesc() { - return showPartsDesc; - } - - /** - * @return information about the partitions we want to add. - */ - @Explain(displayName = "Add Partition Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public AddPartitionDesc getAddPartitionDesc() { - return addPartitionDesc; - } - - /** - * @return information about the partitions we want to rename. - */ - public RenamePartitionDesc getRenamePartitionDesc() { - return renamePartitionDesc; - } - /** * @return information about the table/partitions we want to alter. */ @@ -275,20 +187,6 @@ public void setNeedLock(boolean needLock) { this.needLock = needLock; } - /** - * @return information about the partitions we want to change. - */ - public AlterTableAlterPartDesc getAlterTableAlterPartDesc() { - return alterTableAlterPartDesc; - } - - /** - * @return information about the table partition to be exchanged - */ - public AlterTableExchangePartition getAlterTableExchangePartition() { - return this.alterTableExchangePartition; - } - /** * @return information about the metadata to be cached */ diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java deleted file mode 100644 index b4edbfe633..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.plan; - -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; -import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; - -import java.io.Serializable; -import java.util.LinkedHashMap; -import java.util.Map; - -/** - * Contains the information needed to rename a partition. - */ -public class RenamePartitionDesc extends DDLDesc implements Serializable, DDLDescWithWriteId { - - private static final long serialVersionUID = 1L; - - private String tableName; - private String location; - private LinkedHashMap oldPartSpec; - private LinkedHashMap newPartSpec; - private ReplicationSpec replicationSpec; - private String fqTableName; - private long writeId; - - /** - * For serialization only. - */ - public RenamePartitionDesc() { - } - - /** - * @param tableName - * table to add to. - * @param oldPartSpec - * old partition specification. - * @param newPartSpec - * new partition specification. - * @param table - */ - public RenamePartitionDesc(String tableName, Map oldPartSpec, - Map newPartSpec, ReplicationSpec replicationSpec, Table table) { - this.tableName = tableName; - this.oldPartSpec = new LinkedHashMap(oldPartSpec); - this.newPartSpec = new LinkedHashMap(newPartSpec); - this.replicationSpec = replicationSpec; - this.fqTableName = table != null ? (table.getDbName() + "." + table.getTableName()) : tableName; - } - - /** - * @return the table we're going to add the partitions to. - */ - public String getTableName() { - return tableName; - } - - /** - * @return location of partition in relation to table - */ - public String getLocation() { - return location; - } - - /** - * @param location - * location of partition in relation to table - */ - public void setLocation(String location) { - this.location = location; - } - - /** - * @return old partition specification. - */ - public LinkedHashMap getOldPartSpec() { - return oldPartSpec; - } - - /** - * @param partSpec - * partition specification - */ - public void setOldPartSpec(LinkedHashMap partSpec) { - this.oldPartSpec = partSpec; - } - - /** - * @return new partition specification. - */ - public LinkedHashMap getNewPartSpec() { - return newPartSpec; - } - - /** - * @param partSpec - * partition specification - */ - public void setNewPartSpec(LinkedHashMap partSpec) { - this.newPartSpec = partSpec; - } - - /** - * @return what kind of replication scope this rename is running under. - * This can result in a "RENAME IF NEWER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec() { return this.replicationSpec; } - - @Override - public void setWriteId(long writeId) { - this.writeId = writeId; - } - - public long getWriteId() { return writeId; } - - @Override - public String getFullTableName() { - return fqTableName; - } - - @Override - public boolean mayNeedWriteId() { - return true; // The check is done when setting this as the ACID DDLDesc. - } -} diff --git ql/src/test/queries/clientpositive/alter_partition_change_col.q ql/src/test/queries/clientpositive/alter_partition_change_col.q index c207731208..6f1f110d7a 100644 --- ql/src/test/queries/clientpositive/alter_partition_change_col.q +++ ql/src/test/queries/clientpositive/alter_partition_change_col.q @@ -34,6 +34,7 @@ select * from alter_partition_change_col1 where p1='abc'; select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__'; -- change the comment on a partition column without changing type or renaming it +explain alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1'); alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1'); describe alter_partition_change_col1; diff --git ql/src/test/queries/clientpositive/alter_rename_partition.q ql/src/test/queries/clientpositive/alter_rename_partition.q index b6f6ccce3b..59008ee893 100644 --- ql/src/test/queries/clientpositive/alter_rename_partition.q +++ ql/src/test/queries/clientpositive/alter_rename_partition.q @@ -33,6 +33,7 @@ CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, INSERT OVERWRITE TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') SELECT col1 FROM alter_rename_partition_src ; SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:'; +EXPLAIN ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:'); ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:'); SHOW PARTITIONS alter_rename_partition; SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:'; diff --git ql/src/test/queries/clientpositive/drop_partitions_filter.q ql/src/test/queries/clientpositive/drop_partitions_filter.q index 5862753b23..c402a3b062 100644 --- ql/src/test/queries/clientpositive/drop_partitions_filter.q +++ ql/src/test/queries/clientpositive/drop_partitions_filter.q @@ -1,6 +1,7 @@ create table ptestfilter_n1 (a string, b int) partitioned by (c string, d string); describe ptestfilter_n1; +explain alter table ptestfilter_n1 add partition (c='US', d=1); alter table ptestfilter_n1 add partition (c='US', d=1); alter table ptestfilter_n1 add partition (c='US', d=2); alter table ptestFilter_n1 add partition (c='Uganda', d=2); @@ -12,7 +13,9 @@ alter table ptestfilter_n1 add partition (c='India', d=3); alter table ptestfilter_n1 add partition (c='France', d=4); show partitions ptestfilter_n1; +explain alter table ptestfilter_n1 drop partition (c='US', d<'2'); alter table ptestfilter_n1 drop partition (c='US', d<'2'); +explain show partitions ptestfilter_n1; show partitions ptestfilter_n1; alter table ptestfilter_n1 drop partition (c>='US', d<='2'); @@ -21,6 +24,8 @@ show partitions ptestfilter_n1; alter table ptestfilter_n1 drop partition (c >'India'); show partitions ptestfilter_n1; +explain alter table ptestfilter_n1 drop partition (c >='India'), + partition (c='Greece', d='2'); alter table ptestfilter_n1 drop partition (c >='India'), partition (c='Greece', d='2'); show partitions ptestfilter_n1; diff --git ql/src/test/queries/clientpositive/exchgpartition2lel.q ql/src/test/queries/clientpositive/exchgpartition2lel.q index 567ff8a0bc..63d3c80258 100644 --- ql/src/test/queries/clientpositive/exchgpartition2lel.q +++ ql/src/test/queries/clientpositive/exchgpartition2lel.q @@ -27,6 +27,7 @@ ALTER TABLE t4_n7 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3_n15; SELECT * FROM t3_n15; SELECT * FROM t4_n7; +EXPLAIN ALTER TABLE t6_n2 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5_n3; ALTER TABLE t6_n2 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5_n3; SELECT * FROM t5_n3; SELECT * FROM t6_n2; diff --git ql/src/test/queries/clientpositive/show_partitions.q ql/src/test/queries/clientpositive/show_partitions.q index d22c483fea..f919db8dfb 100644 --- ql/src/test/queries/clientpositive/show_partitions.q +++ ql/src/test/queries/clientpositive/show_partitions.q @@ -20,7 +20,9 @@ ALTER TABLE srcpart ADD PARTITION (ds='4', hr='4'); ALTER TABLE srcpart ADD PARTITION (ds='4', hr='5'); -- from db1 to default db +EXPLAIN SHOW PARTITIONS default.srcpart PARTITION(hr='11'); SHOW PARTITIONS default.srcpart PARTITION(hr='11'); +EXPLAIN SHOW PARTITIONS default.srcpart PARTITION(ds='2008-04-08', hr='12'); SHOW PARTITIONS default.srcpart PARTITION(ds='2008-04-08', hr='12'); -- from db1 to db1 @@ -30,4 +32,4 @@ SHOW PARTITIONS srcpart PARTITION(ds='3', hr='3'); use default; -- from default to db1 SHOW PARTITIONS db1.srcpart PARTITION(ds='4'); -SHOW PARTITIONS db1.srcpart PARTITION(ds='3', hr='3'); \ No newline at end of file +SHOW PARTITIONS db1.srcpart PARTITION(ds='3', hr='3'); diff --git ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out index 95da2195e9..a744a94f53 100644 --- ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out +++ ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out @@ -15,4 +15,4 @@ POSTHOOK: Input: default@part_whitelist_test PREHOOK: query: ALTER TABLE part_whitelist_test ADD PARTITION (ds='1,2,3,4') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@part_whitelist_test -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern)) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern)) diff --git ql/src/test/results/clientnegative/addpart1.q.out ql/src/test/results/clientnegative/addpart1.q.out index e1255bc0f9..61fbc57f95 100644 --- ql/src/test/results/clientnegative/addpart1.q.out +++ ql/src/test/results/clientnegative/addpart1.q.out @@ -23,4 +23,4 @@ b=f/c=s PREHOOK: query: alter table addpart1 add partition (b='f', c='') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@addpart1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. partition spec is invalid; field c does not exist or is empty +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. partition spec is invalid; field c does not exist or is empty diff --git ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out index ae265eb6c9..4049cc7415 100644 --- ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out +++ ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out @@ -23,4 +23,4 @@ PREHOOK: query: ALTER TABLE part_whitelist_test PARTITION (ds='1') rename to par PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@part_whitelist_test PREHOOK: Output: default@part_whitelist_test@ds=1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern) diff --git ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out index 486ab8cdb0..0dddbc1c4d 100644 --- ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out +++ ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out @@ -34,4 +34,4 @@ POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1,pcol2=old_pa PREHOOK: query: alter table alter_rename_partition partition (pCol1='nonexist_part1', pcol2='nonexist_part2') rename to partition (pCol1='new_part1', pcol2='new_part2') PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@alter_rename_partition -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist. diff --git ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out index acb66acd43..566ac6fd12 100644 --- ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out +++ ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out @@ -35,4 +35,4 @@ PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:' PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@alter_rename_partition PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:] +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:] diff --git ql/src/test/results/clientnegative/exchange_partition.q.out ql/src/test/results/clientnegative/exchange_partition.q.out index bfdf413d89..42ca82b86e 100644 --- ql/src/test/results/clientnegative/exchange_partition.q.out +++ ql/src/test/results/clientnegative/exchange_partition.q.out @@ -53,4 +53,4 @@ PREHOOK: query: ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TAB PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION PREHOOK: Input: default@ex_table2 PREHOOK: Output: default@ex_table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.) diff --git ql/src/test/results/clientnegative/external2.q.out ql/src/test/results/clientnegative/external2.q.out index 05ddc28820..88a3bb8de0 100644 --- ql/src/test/results/clientnegative/external2.q.out +++ ql/src/test/results/clientnegative/external2.q.out @@ -10,4 +10,4 @@ POSTHOOK: Output: default@external2 PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### PREHOOK: Output: default@external2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" diff --git ql/src/test/results/clientpositive/add_part_multiple.q.out ql/src/test/results/clientpositive/add_part_multiple.q.out index 7631c4d02f..81454f7dc5 100644 --- ql/src/test/results/clientpositive/add_part_multiple.q.out +++ ql/src/test/results/clientpositive/add_part_multiple.q.out @@ -29,9 +29,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Add Partition Operator: + Add Partition #### A masked pattern was here #### - Spec: {ds=2010-01-01}, {ds=2010-02-01}, {ds=2010-03-01}, {ds=2010-04-01} + Spec: {ds=2010-01-01}, {ds=2010-02-01}, {ds=2010-03-01}, {ds=2010-04-01} PREHOOK: query: ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' diff --git ql/src/test/results/clientpositive/alter_partition_change_col.q.out ql/src/test/results/clientpositive/alter_partition_change_col.q.out index d330026392..9a5ac432ac 100644 --- ql/src/test/results/clientpositive/alter_partition_change_col.q.out +++ ql/src/test/results/clientpositive/alter_partition_change_col.q.out @@ -253,6 +253,22 @@ Snow 56.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 235.0000 __HIVE_DEFAULT_PARTITION__ 123 +PREHOOK: query: explain alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1') +PREHOOK: type: ALTERTABLE_PARTCOLTYPE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: explain alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1') +POSTHOOK: type: ALTERTABLE_PARTCOLTYPE +POSTHOOK: Input: default@alter_partition_change_col1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Partition + partition key name: p1 + partition key type: string + table: default.alter_partition_change_col1 + PREHOOK: query: alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1') PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@alter_partition_change_col1 diff --git ql/src/test/results/clientpositive/alter_rename_partition.q.out ql/src/test/results/clientpositive/alter_rename_partition.q.out index fc7d750b3b..884a321f7d 100644 --- ql/src/test/results/clientpositive/alter_rename_partition.q.out +++ ql/src/test/results/clientpositive/alter_rename_partition.q.out @@ -179,6 +179,28 @@ POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part 4 old_part1: old_part2: 5 old_part1: old_part2: 6 old_part1: old_part2: +PREHOOK: query: EXPLAIN ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: alter_rename_partition_db@alter_rename_partition +PREHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +POSTHOOK: query: EXPLAIN ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition +POSTHOOK: Output: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Rename Partition + new partitions: + pcol1 new_part1: + pcol2 new_part2: + old partitions: + pcol1 old_part1: + pcol2 old_part2: + table: alter_rename_partition_db.alter_rename_partition + PREHOOK: query: ALTER TABLE alter_rename_partition PARTITION (pCol1='old_part1:', pcol2='old_part2:') RENAME TO PARTITION (pCol1='new_part1:', pcol2='new_part2:') PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: alter_rename_partition_db@alter_rename_partition diff --git ql/src/test/results/clientpositive/drop_deleted_partitions.q.out ql/src/test/results/clientpositive/drop_deleted_partitions.q.out index e2c4443055..bc2e19e5b3 100644 --- ql/src/test/results/clientpositive/drop_deleted_partitions.q.out +++ ql/src/test/results/clientpositive/drop_deleted_partitions.q.out @@ -42,9 +42,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Drop Partition Operator: - Drop Partition - table: dmp.mp + Drop Partition + table: dmp.mp PREHOOK: query: alter table dmp.mp drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS diff --git ql/src/test/results/clientpositive/drop_multi_partitions.q.out ql/src/test/results/clientpositive/drop_multi_partitions.q.out index 53978e8cc9..559aca1b3a 100644 --- ql/src/test/results/clientpositive/drop_multi_partitions.q.out +++ ql/src/test/results/clientpositive/drop_multi_partitions.q.out @@ -57,9 +57,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Drop Partition Operator: - Drop Partition - table: dmp.mp_n0 + Drop Partition + table: dmp.mp_n0 PREHOOK: query: alter table dmp.mp_n0 drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS diff --git ql/src/test/results/clientpositive/drop_partitions_filter.q.out ql/src/test/results/clientpositive/drop_partitions_filter.q.out index 2cbc05da5e..457fa0bc2f 100644 --- ql/src/test/results/clientpositive/drop_partitions_filter.q.out +++ ql/src/test/results/clientpositive/drop_partitions_filter.q.out @@ -21,6 +21,21 @@ d string # col_name data_type comment c string d string +PREHOOK: query: explain alter table ptestfilter_n1 add partition (c='US', d=1) +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: explain alter table ptestfilter_n1 add partition (c='US', d=1) +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@ptestfilter_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Add Partition +#### A masked pattern was here #### + Spec: {c=US, d=1} + PREHOOK: query: alter table ptestfilter_n1 add partition (c='US', d=1) PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@ptestfilter_n1 @@ -99,6 +114,22 @@ c=Russia/d=3 c=US/d=1 c=US/d=2 c=Uganda/d=2 +PREHOOK: query: explain alter table ptestfilter_n1 drop partition (c='US', d<'2') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1@c=US/d=1 +POSTHOOK: query: explain alter table ptestfilter_n1 drop partition (c='US', d<'2') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=US/d=1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Partition + table: default.ptestfilter_n1 + PREHOOK: query: alter table ptestfilter_n1 drop partition (c='US', d<'2') PREHOOK: type: ALTERTABLE_DROPPARTS PREHOOK: Input: default@ptestfilter_n1 @@ -107,6 +138,27 @@ POSTHOOK: query: alter table ptestfilter_n1 drop partition (c='US', d<'2') POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@ptestfilter_n1 POSTHOOK: Output: default@ptestfilter_n1@c=US/d=1 +PREHOOK: query: explain show partitions ptestfilter_n1 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: explain show partitions ptestfilter_n1 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@ptestfilter_n1 +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + table: ptestfilter_n1 + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@ptestfilter_n1 @@ -162,6 +214,26 @@ c=France/d=4 c=Germany/d=2 c=Greece/d=2 c=India/d=3 +PREHOOK: query: explain alter table ptestfilter_n1 drop partition (c >='India'), + partition (c='Greece', d='2') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1@c=Greece/d=2 +PREHOOK: Output: default@ptestfilter_n1@c=India/d=3 +POSTHOOK: query: explain alter table ptestfilter_n1 drop partition (c >='India'), + partition (c='Greece', d='2') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Greece/d=2 +POSTHOOK: Output: default@ptestfilter_n1@c=India/d=3 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Partition + table: default.ptestfilter_n1 + PREHOOK: query: alter table ptestfilter_n1 drop partition (c >='India'), partition (c='Greece', d='2') PREHOOK: type: ALTERTABLE_DROPPARTS diff --git ql/src/test/results/clientpositive/llap/exchgpartition2lel.q.out ql/src/test/results/clientpositive/llap/exchgpartition2lel.q.out index 2ec3e1a4e7..45e6b0a280 100644 --- ql/src/test/results/clientpositive/llap/exchgpartition2lel.q.out +++ ql/src/test/results/clientpositive/llap/exchgpartition2lel.q.out @@ -171,6 +171,25 @@ POSTHOOK: Input: default@t4_n7 POSTHOOK: Input: default@t4_n7@d1=1/d2=1 #### A masked pattern was here #### 100 1 1 +PREHOOK: query: EXPLAIN ALTER TABLE t6_n2 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5_n3 +PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION +PREHOOK: Input: default@t5_n3 +PREHOOK: Output: default@t6_n2 +POSTHOOK: query: EXPLAIN ALTER TABLE t6_n2 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5_n3 +POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION +POSTHOOK: Input: default@t5_n3 +POSTHOOK: Output: default@t6_n2 +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Exchange Partitions + partitions: + d1 1 + d2 1 + d3 1 + PREHOOK: query: ALTER TABLE t6_n2 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5_n3 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION PREHOOK: Input: default@t5_n3 diff --git ql/src/test/results/clientpositive/show_partitions.q.out ql/src/test/results/clientpositive/show_partitions.q.out index 8b7473a182..0a73374987 100644 --- ql/src/test/results/clientpositive/show_partitions.q.out +++ ql/src/test/results/clientpositive/show_partitions.q.out @@ -115,6 +115,29 @@ POSTHOOK: query: ALTER TABLE srcpart ADD PARTITION (ds='4', hr='5') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: db1@srcpart POSTHOOK: Output: db1@srcpart@ds=4/hr=5 +PREHOOK: query: EXPLAIN SHOW PARTITIONS default.srcpart PARTITION(hr='11') +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@srcpart +POSTHOOK: query: EXPLAIN SHOW PARTITIONS default.srcpart PARTITION(hr='11') +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@srcpart +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + partSpec: + hr 11 + table: default.srcpart + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW PARTITIONS default.srcpart PARTITION(hr='11') PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart @@ -123,6 +146,30 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 ds=2008-04-09/hr=11 +PREHOOK: query: EXPLAIN SHOW PARTITIONS default.srcpart PARTITION(ds='2008-04-08', hr='12') +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@srcpart +POSTHOOK: query: EXPLAIN SHOW PARTITIONS default.srcpart PARTITION(ds='2008-04-08', hr='12') +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@srcpart +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Partitions + partSpec: + ds 2008-04-08 + hr 12 + table: default.srcpart + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SHOW PARTITIONS default.srcpart PARTITION(ds='2008-04-08', hr='12') PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart diff --git ql/src/test/results/clientpositive/showparts.q.out ql/src/test/results/clientpositive/showparts.q.out index ddb44b64e7..662b7a2fab 100644 --- ql/src/test/results/clientpositive/showparts.q.out +++ ql/src/test/results/clientpositive/showparts.q.out @@ -12,9 +12,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Show Partitions Operator: - Show Partitions - table: srcpart + Show Partitions + table: srcpart Stage: Stage-1 Fetch Operator diff --git ql/src/test/results/clientpositive/spark/add_part_multiple.q.out ql/src/test/results/clientpositive/spark/add_part_multiple.q.out index 7631c4d02f..81454f7dc5 100644 --- ql/src/test/results/clientpositive/spark/add_part_multiple.q.out +++ ql/src/test/results/clientpositive/spark/add_part_multiple.q.out @@ -29,9 +29,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 - Add Partition Operator: + Add Partition #### A masked pattern was here #### - Spec: {ds=2010-01-01}, {ds=2010-02-01}, {ds=2010-03-01}, {ds=2010-04-01} + Spec: {ds=2010-01-01}, {ds=2010-02-01}, {ds=2010-03-01}, {ds=2010-04-01} PREHOOK: query: ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' diff --git streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java index fa7e079331..659595df3d 100644 --- streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java +++ streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; +import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; import org.apache.thrift.TException; @@ -435,7 +435,7 @@ public PartitionInfo createPartitionIfNotExists(final List partitionValu try { Map partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); - AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true); + AlterTableAddPartitionDesc addPartitionDesc = new AlterTableAddPartitionDesc(database, table, true); partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues); partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation);