diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index abd351d51a..8e5db2d8a9 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.ddl.table.info.ShowTablesDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.ddl.table.storage.AlterTableSetLocationDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -41,8 +42,6 @@ import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.ql.session.SessionState; @@ -334,44 +333,32 @@ protected void authorizeDDLWork2(HiveSemanticAnalyzerHookContext cntxt, Hive hiv ShowPartitionsDesc showParts = (ShowPartitionsDesc)ddlDesc; String tableName = extractTableName(showParts.getTabName()); authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); - } - } - - @Override - protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) - throws HiveException { - // TODO: add alter database support in HCat - - // Table operations. - AlterTableDesc alterTable = work.getAlterTblDesc(); - if (alterTable != null) { + } else if (ddlDesc instanceof AlterTableSetLocationDesc) { + AlterTableSetLocationDesc alterTable = (AlterTableSetLocationDesc)ddlDesc; Table table = hive.getTable(SessionState.get().getCurrentDatabase(), - Utilities.getDbTableName(alterTable.getOldName())[1], false); + Utilities.getDbTableName(alterTable.getTableName())[1], false); Partition part = null; - if (alterTable.getPartSpec() != null) { - part = hive.getPartition(table, alterTable.getPartSpec(), false); + if (alterTable.getPartitionSpec() != null) { + part = hive.getPartition(table, alterTable.getPartitionSpec(), false); } - String newLocation = alterTable.getNewLocation(); + String newLocation = alterTable.getLocation(); /* Hcat requires ALTER_DATA privileges for ALTER TABLE LOCATION statements * for the old table/partition location and the new location. */ - if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) { - if (part != null) { - authorize(part, Privilege.ALTER_DATA); // authorize for the old - // location, and new location - part.setLocation(newLocation); - authorize(part, Privilege.ALTER_DATA); - } else { - authorize(table, Privilege.ALTER_DATA); // authorize for the old - // location, and new location - table.getTTable().getSd().setLocation(newLocation); - authorize(table, Privilege.ALTER_DATA); - } + if (part != null) { + authorize(part, Privilege.ALTER_DATA); // authorize for the old + // location, and new location + part.setLocation(newLocation); + authorize(part, Privilege.ALTER_DATA); + } else { + authorize(table, Privilege.ALTER_DATA); // authorize for the old + // location, and new location + table.getTTable().getSd().setLocation(newLocation); + authorize(table, Privilege.ALTER_DATA); } - //other alter operations are already supported by Hive } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java index 3ff04131d0..432779b3f4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java @@ -42,19 +42,20 @@ private final ReplicationSpec replicationSpec; private final boolean isCascade; private final boolean expectView; - - private Map props; + private final Map props; private Long writeId; public AbstractAlterTableDesc(AlterTableTypes type, String tableName, Map partitionSpec, - ReplicationSpec replicationSpec, boolean isCascade, boolean expectView) throws SemanticException { + ReplicationSpec replicationSpec, boolean isCascade, boolean expectView, Map props) + throws SemanticException { this.type = type; this.tableName = String.join(".", Utilities.getDbTableName(tableName)); this.partitionSpec = partitionSpec; this.replicationSpec = replicationSpec; this.isCascade = isCascade; this.expectView = expectView; + this.props = props; } public AlterTableTypes getType() { @@ -85,6 +86,7 @@ public boolean expectView() { return expectView; } + @Explain(displayName = "props", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public Map getProps() { return props; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java index 5d8cd94939..baf98da37a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java @@ -56,6 +56,8 @@ public AbstractAlterTableOperation(DDLOperationContext context, AbstractAlterTab this.desc = desc; } + protected EnvironmentContext environmentContext; + @Override public int execute() throws HiveException { if (!AlterTableUtils.allowOperationInReplicationScope(context.getDb(), desc.getTableName(), null, @@ -72,7 +74,7 @@ public int execute() throws HiveException { // Don't change the table object returned by the metastore, as we'll mess with it's caches. Table table = oldTable.copy(); - EnvironmentContext environmentContext = initializeEnvironmentContext(null); + environmentContext = initializeEnvironmentContext(null); if (partitions == null) { doAlteration(table, null); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java index ebd3bdbf4d..9babf2a1a9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableWithConstraintsDesc.java @@ -36,8 +36,8 @@ public AbstractAlterTableWithConstraintsDesc(AlterTableTypes type, String tableName, Map partitionSpec, ReplicationSpec replicationSpec, boolean isCascade, boolean expectView, - Constraints constraints) throws SemanticException { - super(type, tableName, partitionSpec, replicationSpec, isCascade, expectView); + Map props, Constraints constraints) throws SemanticException { + super(type, tableName, partitionSpec, replicationSpec, isCascade, expectView, props); this.constraints = constraints; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java index a15597c071..e40ba1819d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java @@ -44,7 +44,7 @@ public AlterTableAddColumnsDesc(String tableName, Map partitionSpec, boolean isCascade, List newColumns) throws SemanticException { - super(AlterTableTypes.ADD_COLUMNS, tableName, partitionSpec, null, isCascade, false); + super(AlterTableTypes.ADD_COLUMNS, tableName, partitionSpec, null, isCascade, false, null); this.newColumns = newColumns; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java index 827cc80d14..ce3b97eb68 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java @@ -49,7 +49,7 @@ public AlterTableChangeColumnDesc(String tableName, Map partitionSpec, boolean isCascade, Constraints constraints, String oldColumnName, String newColumnName, String newColumnType, String newColumnComment, boolean first, String afterColumn) throws SemanticException { - super(AlterTableTypes.RENAME_COLUMN, tableName, partitionSpec, null, isCascade, false, constraints); + super(AlterTableTypes.RENAME_COLUMN, tableName, partitionSpec, null, isCascade, false, null, constraints); this.oldColumnName = oldColumnName; this.newColumnName = newColumnName; diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java index 6947c1ed51..3975f6682a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java @@ -45,7 +45,7 @@ public AlterTableReplaceColumnsDesc(String tableName, Map partitionSpec, boolean isCascade, List newColumns) throws SemanticException { - super(AlterTableTypes.REPLACE_COLUMNS, tableName, partitionSpec, null, isCascade, false); + super(AlterTableTypes.REPLACE_COLUMNS, tableName, partitionSpec, null, isCascade, false, null); this.newColumns = newColumns; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java index 116fa2d28c..5722490feb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import avro.shaded.com.google.common.collect.ImmutableSet; +import com.google.common.collect.ImmutableSet; /** * Operation process of replacing two columns. diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java index f1a1ea12e5..18485c9a81 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java @@ -41,7 +41,7 @@ public AlterTableUpdateColumnsDesc(String tableName, Map partitionSpec, boolean isCascade) throws SemanticException { - super(AlterTableTypes.UPDATE_COLUMNS, tableName, partitionSpec, null, isCascade, false); + super(AlterTableTypes.UPDATE_COLUMNS, tableName, partitionSpec, null, isCascade, false, null); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java index cd8deab596..2077c7d7e6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java @@ -38,7 +38,7 @@ public AlterTableAddConstraintDesc(String tableName, ReplicationSpec replicationSpec, Constraints constraints) throws SemanticException { - super(AlterTableTypes.ADD_CONSTRAINT, tableName, null, replicationSpec, false, false, constraints); + super(AlterTableTypes.ADD_CONSTRAINT, tableName, null, replicationSpec, false, false, null, constraints); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java index 3ea7443787..74a799458e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/ShowCreateTableOperation.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.util.DirectionUtils; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hive.common.util.HiveStringUtils; import org.stringtemplate.v4.ST; @@ -178,12 +178,7 @@ private int showCreateTable(DataOutputStream outStream) throws HiveException { // Order List sortKeys = new ArrayList(); for (Order sortCol : sortCols) { - String sortKeyDesc = " " + sortCol.getCol() + " "; - if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { - sortKeyDesc = sortKeyDesc + "ASC"; - } else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { - sortKeyDesc = sortKeyDesc + "DESC"; - } + String sortKeyDesc = " " + sortCol.getCol() + " " + DirectionUtils.codeToText(sortCol.getOrder()); sortKeys.add(sortKeyDesc); } tblSortBucket += StringUtils.join(sortKeys, ", \n"); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java new file mode 100644 index 0000000000..8aab47b840 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.ql.util.DirectionUtils; + +/** + * DDL task description for ALTER TABLE ... CLUSTERED BY ... SORTED BY ... [INTO ... BUCKETS] commands. + */ +@Explain(displayName = "Clustered By", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableClusteredByDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableClusteredByDesc.class, AlterTableClusteredByOperation.class); + } + + private final int numberBuckets; + private final List bucketColumns; + private final List sortColumns; + + public AlterTableClusteredByDesc(String tableName, Map partitionSpec, int numberBuckets, + List bucketColumns, List sortColumns) throws SemanticException { + super(AlterTableTypes.CLUSTERED_BY, tableName, partitionSpec, null, false, false, null); + this.numberBuckets = numberBuckets; + this.bucketColumns = bucketColumns; + this.sortColumns = sortColumns; + } + + @Explain(displayName = "number of buckets", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public int getNumberBuckets() { + return numberBuckets; + } + + @Explain(displayName = "bucket columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getBucketColumns() { + return bucketColumns; + } + + public List getSortColumns() { + return sortColumns; + } + + // Only for explaining + @Explain(displayName = "sort columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getSortColumnsExplain() { + return sortColumns.stream() + .map(t -> t.getCol() + " " + DirectionUtils.codeToText(t.getOrder())) + .collect(Collectors.toList()); + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByOperation.java new file mode 100644 index 0000000000..bb5b2efbc1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByOperation.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of clustering a table by some column. + */ +public class AlterTableClusteredByOperation extends AbstractAlterTableOperation { + private final AlterTableClusteredByDesc desc; + + public AlterTableClusteredByOperation(DDLOperationContext context, AlterTableClusteredByDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + // validate sort columns and bucket columns + List columns = Utilities.getColumnNamesFromFieldSchema(table.getCols()); + Utilities.validateColumnNames(columns, desc.getBucketColumns()); + if (desc.getSortColumns() != null) { + Utilities.validateColumnNames(columns, Utilities.getColumnNamesFromSortCols(desc.getSortColumns())); + } + + sd.setBucketCols(desc.getBucketColumns()); + sd.setNumBuckets(desc.getNumberBuckets()); + sd.setSortCols(desc.getSortColumns()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java new file mode 100644 index 0000000000..680f31096e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... INTO ... BUCKETS commands. + */ +@Explain(displayName = "Into Buckets", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableIntoBucketsDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableIntoBucketsDesc.class, AlterTableIntoBucketsOperation.class); + } + + private final int numberOfBuckets; + + public AlterTableIntoBucketsDesc(String tableName, Map partitionSpec, int numberOfBuckets) + throws SemanticException { + super(AlterTableTypes.INTO_BUCKETS, tableName, partitionSpec, null, false, false, null); + this.numberOfBuckets = numberOfBuckets; + } + + @Explain(displayName = "number of buckets", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public int getNumberOfBuckets() { + return numberOfBuckets; + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsOperation.java new file mode 100644 index 0000000000..d9f48ccded --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsOperation.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of setting the number of buckets. + */ +public class AlterTableIntoBucketsOperation extends AbstractAlterTableOperation { + private final AlterTableIntoBucketsDesc desc; + + public AlterTableIntoBucketsOperation(DDLOperationContext context, AlterTableIntoBucketsDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + if (partition != null) { + if (partition.getBucketCount() == desc.getNumberOfBuckets()) { + return; + } + partition.setBucketCount(desc.getNumberOfBuckets()); + } else { + if (table.getNumBuckets() == desc.getNumberOfBuckets()) { + return; + } + table.setNumBuckets(desc.getNumberOfBuckets()); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java new file mode 100644 index 0000000000..a335d0dbdc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... NOT CLUSTERED commands. + */ +@Explain(displayName = "Not Clustered", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableNotClusteredDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableNotClusteredDesc.class, AlterTableNotClusteredOperation.class); + } + + public AlterTableNotClusteredDesc(String tableName, Map partitionSpec) throws SemanticException { + super(AlterTableTypes.NOT_CLUSTERED, tableName, partitionSpec, null, false, false, null); + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredOperation.java new file mode 100644 index 0000000000..6cca228ddc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredOperation.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.ArrayList; + +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of turning off clustering. + */ +public class AlterTableNotClusteredOperation extends AbstractAlterTableOperation { + + public AlterTableNotClusteredOperation(DDLOperationContext context, AlterTableNotClusteredDesc desc) { + super(context, desc); + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + sd.setBucketCols(new ArrayList()); + sd.setNumBuckets(-1); // -1 buckets means to turn off bucketing + sd.setSortCols(new ArrayList()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java new file mode 100644 index 0000000000..af67964c73 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... NOT SKEWED commands. + */ +@Explain(displayName = "Not Skewed", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableNotSkewedDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableNotSkewedDesc.class, AlterTableNotSkewedOperation.class); + } + + public AlterTableNotSkewedDesc(String tableName) throws SemanticException { + super(AlterTableTypes.NOT_SKEWED, tableName, null, null, false, false, null); + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedOperation.java new file mode 100644 index 0000000000..ab39790c29 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedOperation.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of turning of skewing. + */ +public class AlterTableNotSkewedOperation extends AbstractAlterTableOperation { + + public AlterTableNotSkewedOperation(DDLOperationContext context, AlterTableNotSkewedDesc desc) { + super(context, desc); + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + // Validation's been done at compile time. no validation is needed here. + // Convert skewed table to non-skewed table. + if (table.getSkewedInfo() == null) { + table.setSkewedInfo(new SkewedInfo()); + } + table.setSkewedColNames(new ArrayList()); + table.setSkewedColValues(new ArrayList>()); + + table.setStoredAsSubDirectories(false); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java new file mode 100644 index 0000000000..11e8bf37eb --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... NOT SORTED commands. + */ +@Explain(displayName = "Not Sorted", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableNotSortedDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableNotSortedDesc.class, AlterTableNotSortedOperation.class); + } + + public AlterTableNotSortedDesc(String tableName, Map partitionSpec) throws SemanticException { + super(AlterTableTypes.NOT_SORTED, tableName, partitionSpec, null, false, false, null); + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedOperation.java new file mode 100644 index 0000000000..fec0d1b941 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedOperation.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.ArrayList; + +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of turning of sorting. + */ +public class AlterTableNotSortedOperation extends AbstractAlterTableOperation { + + public AlterTableNotSortedOperation(DDLOperationContext context, AlterTableNotSortedDesc desc) { + super(context, desc); + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + sd.setSortCols(new ArrayList()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java new file mode 100644 index 0000000000..89bbb17aec --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... SET FILEFORMAT ... commands. + */ +@Explain(displayName = "Set File Format", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSetFileFormatDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSetFileFormatDesc.class, AlterTableSetFileFormatOperation.class); + } + + private final String inputFormat; + private final String outputFormat; + private final String serdeName; + + public AlterTableSetFileFormatDesc(String tableName, Map partitionSpec, String inputFormat, + String outputFormat, String serdeName) throws SemanticException { + super(AlterTableTypes.SET_FILE_FORMAT, tableName, partitionSpec, null, false, false, null); + this.inputFormat = inputFormat; + this.outputFormat = outputFormat; + this.serdeName = serdeName; + } + + @Explain(displayName = "input format", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getInputFormat() { + return inputFormat; + } + + @Explain(displayName = "output format", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getOutputFormat() { + return outputFormat; + } + + @Explain(displayName = "serde name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getSerdeName() { + return serdeName; + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatOperation.java new file mode 100644 index 0000000000..bc1a2deb99 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatOperation.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableUtils; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of setting the file format. + */ +public class AlterTableSetFileFormatOperation extends AbstractAlterTableOperation { + private final AlterTableSetFileFormatDesc desc; + + public AlterTableSetFileFormatOperation(DDLOperationContext context, AlterTableSetFileFormatDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + // if orc table, restrict changing the file format as it can break schema evolution + if (AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()) && + sd.getInputFormat().equals(OrcInputFormat.class.getName()) + && !desc.getInputFormat().equals(OrcInputFormat.class.getName())) { + throw new HiveException(ErrorMsg.CANNOT_CHANGE_FILEFORMAT, "ORC", desc.getTableName()); + } + + sd.setInputFormat(desc.getInputFormat()); + sd.setOutputFormat(desc.getOutputFormat()); + if (desc.getSerdeName() != null) { + sd.getSerdeInfo().setSerializationLib(desc.getSerdeName()); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java new file mode 100644 index 0000000000..c918bb9870 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... SET LOCATION ... commands. + */ +@Explain(displayName = "Set Location", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSetLocationDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSetLocationDesc.class, AlterTableSetLocationOperation.class); + } + + private final String location; + + public AlterTableSetLocationDesc(String tableName, Map partitionSpec, String location) + throws SemanticException { + super(AlterTableTypes.SET_LOCATION, tableName, partitionSpec, null, false, false, null); + this.location = location; + } + + @Explain(displayName = "location", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getLocation() { + return location; + } + + @Override + public boolean mayNeedWriteId() { + return true; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationOperation.java new file mode 100644 index 0000000000..12cfca07c3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationOperation.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of setting the location of a table. + */ +public class AlterTableSetLocationOperation extends AbstractAlterTableOperation { + private final AlterTableSetLocationDesc desc; + + public AlterTableSetLocationOperation(DDLOperationContext context, AlterTableSetLocationDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + String newLocation = desc.getLocation(); + try { + URI locUri = new URI(newLocation); + if (!new Path(locUri).isAbsolute()) { + throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation); + } + sd.setLocation(newLocation); + } catch (URISyntaxException e) { + throw new HiveException(e); + } + environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java new file mode 100644 index 0000000000..861139d41b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... SET SERDE ... commands. + */ +@Explain(displayName = "Set Serde", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSetSerdeDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSetSerdeDesc.class, AlterTableSetSerdeOperation.class); + } + + private final String serdeName; + + public AlterTableSetSerdeDesc(String tableName, Map partitionSpec, Map props, + String serdeName) throws SemanticException { + super(AlterTableTypes.SET_SERDE, tableName, partitionSpec, null, false, false, props); + this.serdeName = serdeName; + } + + @Explain(displayName = "serde", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getSerdeName() { + return serdeName; + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeOperation.java new file mode 100644 index 0000000000..9e173207df --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeOperation.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import org.apache.commons.collections.MapUtils; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableUtils; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.io.orc.OrcSerde; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.serde2.Deserializer; + +/** + * Operation process of setting the serde. + */ +public class AlterTableSetSerdeOperation extends AbstractAlterTableOperation { + private final AlterTableSetSerdeDesc desc; + + public AlterTableSetSerdeOperation(DDLOperationContext context, AlterTableSetSerdeDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + String serdeName = desc.getSerdeName(); + String oldSerdeName = sd.getSerdeInfo().getSerializationLib(); + + // if orc table, restrict changing the serde as it can break schema evolution + if (AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()) && + oldSerdeName.equalsIgnoreCase(OrcSerde.class.getName()) && + !serdeName.equalsIgnoreCase(OrcSerde.class.getName())) { + throw new HiveException(ErrorMsg.CANNOT_CHANGE_SERDE, OrcSerde.class.getSimpleName(), + desc.getTableName()); + } + + sd.getSerdeInfo().setSerializationLib(serdeName); + if (MapUtils.isNotEmpty(desc.getProps())) { + sd.getSerdeInfo().getParameters().putAll(desc.getProps()); + } + + if (partition != null) { + // TODO: wtf? This doesn't do anything. + partition.getTPartition().getSd().setCols(partition.getTPartition().getSd().getCols()); + } else { + if (Table.shouldStoreFieldsInMetastore(context.getConf(), serdeName, table.getParameters()) + && !Table.hasMetastoreBasedSchema(context.getConf(), oldSerdeName)) { + // If new SerDe needs to store fields in metastore, but the old serde doesn't, save + // the fields so that new SerDe could operate. Note that this may fail if some fields + // from old SerDe are too long to be stored in metastore, but there's nothing we can do. + try { + Deserializer oldSerde = HiveMetaStoreUtils.getDeserializer(context.getConf(), table.getTTable(), false, + oldSerdeName); + table.setFields(Hive.getFieldsFromDeserializer(table.getTableName(), oldSerde)); + } catch (MetaException ex) { + throw new HiveException(ex); + } + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java new file mode 100644 index 0000000000..381b94f38a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... SET SERDEPROPERTIES ... commands. + */ +@Explain(displayName = "Set Serde Props", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSetSerdePropsDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSetSerdePropsDesc.class, AlterTableSetSerdePropsOperation.class); + } + + public AlterTableSetSerdePropsDesc(String tableName, Map partitionSpec, Map props) + throws SemanticException { + super(AlterTableTypes.SET_SERDE_PROPS, tableName, partitionSpec, null, false, false, props); + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsOperation.java new file mode 100644 index 0000000000..e1294e95e2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of setting serde properties. + */ +public class AlterTableSetSerdePropsOperation extends AbstractAlterTableOperation { + private final AlterTableSetSerdePropsDesc desc; + + public AlterTableSetSerdePropsOperation(DDLOperationContext context, AlterTableSetSerdePropsDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + StorageDescriptor sd = getStorageDescriptor(table, partition); + sd.getSerdeInfo().getParameters().putAll(desc.getProps()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java new file mode 100644 index 0000000000..afe2b0817b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... SET SKEWED LOCATION commands. + */ +@Explain(displayName = "Set Skewed Location", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSetSkewedLocationDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSetSkewedLocationDesc.class, AlterTableSetSkewedLocationOperation.class); + } + + private final Map, String> skewedLocations; + + public AlterTableSetSkewedLocationDesc(String tableName, Map partitionSpec, + Map, String> skewedLocations) throws SemanticException { + super(AlterTableTypes.SET_SKEWED_LOCATION, tableName, partitionSpec, null, false, false, null); + this.skewedLocations = skewedLocations; + } + + public Map, String> getSkewedLocations() { + return skewedLocations; + } + + // for Explain only + @Explain(displayName = "skewed locations", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getSkewedLocationsExplain() { + return skewedLocations.entrySet().stream() + .map(e -> "(" + e.getKey() + ": " + e.getValue() + ")").collect(Collectors.toList()); + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationOperation.java new file mode 100644 index 0000000000..5385ddcbbe --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationOperation.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of setting the skewed location. + */ +public class AlterTableSetSkewedLocationOperation extends AbstractAlterTableOperation { + private final AlterTableSetSkewedLocationDesc desc; + + public AlterTableSetSkewedLocationOperation(DDLOperationContext context, AlterTableSetSkewedLocationDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + // process location one-by-one + for (Map.Entry, String> entry : desc.getSkewedLocations().entrySet()) { + List key = entry.getKey(); + String newLocation = entry.getValue(); + try { + URI locationUri = new URI(newLocation); + List skewedLocation = new ArrayList(key); + if (partition != null) { + partition.setSkewedValueLocationMap(skewedLocation, locationUri.toString()); + } else { + table.setSkewedValueLocationMap(skewedLocation, locationUri.toString()); + } + } catch (URISyntaxException e) { + throw new HiveException(e); + } + } + + environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java new file mode 100644 index 0000000000..6a6f397ef7 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for ALTER TABLE ... [SKEWED BY ... ON ...] [[NOT] STORED AS DIRECTORIES] commands. + */ +@Explain(displayName = "Skewed By", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableSkewedByDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(AlterTableSkewedByDesc.class, AlterTableSkewedByOperation.class); + } + + private final List skewedColumnNames; + private final List> skewedColumnValues; + private final boolean isStoredAsDirectories; + + public AlterTableSkewedByDesc(String tableName, List skewedColumnNames, List> skewedColumnValues, + boolean isStoredAsDirectories) throws SemanticException { + super(AlterTableTypes.SKEWED_BY, tableName, null, null, false, false, null); + this.skewedColumnNames = skewedColumnNames; + this.skewedColumnValues = skewedColumnValues; + this.isStoredAsDirectories = isStoredAsDirectories; + } + + @Explain(displayName = "skewedColumnNames", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getSkewedColumnNames() { + return skewedColumnNames; + } + + public List> getSkewedColumnValues() { + return skewedColumnValues; + } + + // for Explain only + @Explain(displayName = "skewedColumnValues", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public List getSkewedColumnValuesForExplain() { + return skewedColumnValues.stream().map(l -> l.toString()).collect(Collectors.toList()); + } + + @Explain(displayName = "isStoredAsDirectories", displayOnlyOnTrue = true, + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isStoredAsDirectories() { + return isStoredAsDirectories; + } + + @Override + public boolean mayNeedWriteId() { + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByOperation.java new file mode 100644 index 0000000000..77ef217c6b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.storage; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process of setting the location of a table. + */ +public class AlterTableSkewedByOperation extends AbstractAlterTableOperation { + private final AlterTableSkewedByDesc desc; + + public AlterTableSkewedByOperation(DDLOperationContext context, AlterTableSkewedByDesc desc) { + super(context, desc); + this.desc = desc; + } + + @Override + protected void doAlteration(Table table, Partition partition) throws HiveException { + // Validation's been done at compile time. no validation is needed here. + List skewedColNames = desc.getSkewedColumnNames(); + List> skewedValues = desc.getSkewedColumnValues(); + + if (table.getSkewedInfo() == null) { + table.setSkewedInfo(new SkewedInfo()); + } + table.setSkewedColNames(skewedColNames); + table.setSkewedColValues(skewedValues); + + table.setStoredAsSubDirectories(desc.isStoredAsDirectories()); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/package-info.java new file mode 100644 index 0000000000..082b87d9dd --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Storage related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table.storage; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 2e955aef9d..88ea73f8d5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.Msck; import org.apache.hadoop.hive.metastore.MsckInfo; import org.apache.hadoop.hive.metastore.TableType; @@ -50,10 +49,8 @@ import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.CompilationOpContext; @@ -69,8 +66,6 @@ import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.merge.MergeFileTask; import org.apache.hadoop.hive.ql.io.merge.MergeFileWork; -import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; -import org.apache.hadoop.hive.ql.io.orc.OrcSerde; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -1286,158 +1281,11 @@ private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition return alterTableAddProps(alterTbl, tbl, part, environmentContext); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) { return alterTableDropProps(alterTbl, tbl, part, environmentContext); - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps()); - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - String serdeName = alterTbl.getSerdeName(); - String oldSerdeName = sd.getSerdeInfo().getSerializationLib(); - // if orc table, restrict changing the serde as it can break schema evolution - if (isSchemaEvolutionEnabled(tbl) && - oldSerdeName.equalsIgnoreCase(OrcSerde.class.getName()) && - !serdeName.equalsIgnoreCase(OrcSerde.class.getName())) { - throw new HiveException(ErrorMsg.CANNOT_CHANGE_SERDE, OrcSerde.class.getSimpleName(), - alterTbl.getOldName()); - } - sd.getSerdeInfo().setSerializationLib(serdeName); - if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) { - sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps()); - } - if (part != null) { - // TODO: wtf? This doesn't do anything. - part.getTPartition().getSd().setCols(part.getTPartition().getSd().getCols()); - } else { - if (Table.shouldStoreFieldsInMetastore(conf, serdeName, tbl.getParameters()) - && !Table.hasMetastoreBasedSchema(conf, oldSerdeName)) { - // If new SerDe needs to store fields in metastore, but the old serde doesn't, save - // the fields so that new SerDe could operate. Note that this may fail if some fields - // from old SerDe are too long to be stored in metastore, but there's nothing we can do. - try { - Deserializer oldSerde = HiveMetaStoreUtils.getDeserializer( - conf, tbl.getTTable(), false, oldSerdeName); - tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), oldSerde)); - } catch (MetaException ex) { - throw new HiveException(ex); - } - } - } - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - // if orc table, restrict changing the file format as it can break schema evolution - if (isSchemaEvolutionEnabled(tbl) && - sd.getInputFormat().equals(OrcInputFormat.class.getName()) - && !alterTbl.getInputFormat().equals(OrcInputFormat.class.getName())) { - throw new HiveException(ErrorMsg.CANNOT_CHANGE_FILEFORMAT, "ORC", alterTbl.getOldName()); - } - sd.setInputFormat(alterTbl.getInputFormat()); - sd.setOutputFormat(alterTbl.getOutputFormat()); - if (alterTbl.getSerdeName() != null) { - sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName()); - } - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - // validate sort columns and bucket columns - List columns = Utilities.getColumnNamesFromFieldSchema(tbl - .getCols()); - if (!alterTbl.isTurnOffSorting()) { - Utilities.validateColumnNames(columns, alterTbl.getBucketColumns()); - } - if (alterTbl.getSortColumns() != null) { - Utilities.validateColumnNames(columns, Utilities - .getColumnNamesFromSortCols(alterTbl.getSortColumns())); - } - - if (alterTbl.isTurnOffSorting()) { - sd.setSortCols(new ArrayList()); - } else if (alterTbl.getNumberBuckets() == -1) { - // -1 buckets means to turn off bucketing - sd.setBucketCols(new ArrayList()); - sd.setNumBuckets(-1); - sd.setSortCols(new ArrayList()); - } else { - sd.setBucketCols(alterTbl.getBucketColumns()); - sd.setNumBuckets(alterTbl.getNumberBuckets()); - sd.setSortCols(alterTbl.getSortColumns()); - } - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) { - StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); - String newLocation = alterTbl.getNewLocation(); - try { - URI locUri = new URI(newLocation); - if (!new Path(locUri).isAbsolute()) { - throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation); - } - sd.setLocation(newLocation); - } catch (URISyntaxException e) { - throw new HiveException(e); - } - environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); - - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSKEWEDBY) { - // Validation's been done at compile time. no validation is needed here. - List skewedColNames = null; - List> skewedValues = null; - - if (alterTbl.isTurnOffSkewed()) { - // Convert skewed table to non-skewed table. - skewedColNames = new ArrayList(); - skewedValues = new ArrayList>(); - } else { - skewedColNames = alterTbl.getSkewedColNames(); - skewedValues = alterTbl.getSkewedColValues(); - } - - if ( null == tbl.getSkewedInfo()) { - // Convert non-skewed table to skewed table. - SkewedInfo skewedInfo = new SkewedInfo(); - skewedInfo.setSkewedColNames(skewedColNames); - skewedInfo.setSkewedColValues(skewedValues); - tbl.setSkewedInfo(skewedInfo); - } else { - tbl.setSkewedColNames(skewedColNames); - tbl.setSkewedColValues(skewedValues); - } - - tbl.setStoredAsSubDirectories(alterTbl.isStoredAsSubDirectories()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.OWNER) { if (alterTbl.getOwnerPrincipal() != null) { tbl.setOwner(alterTbl.getOwnerPrincipal().getName()); tbl.setOwnerType(alterTbl.getOwnerPrincipal().getType()); } - } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERSKEWEDLOCATION) { - // process location one-by-one - Map,String> locMaps = alterTbl.getSkewedLocations(); - Set> keys = locMaps.keySet(); - for(List key:keys){ - String newLocation = locMaps.get(key); - try { - URI locUri = new URI(newLocation); - if (part != null) { - List slk = new ArrayList(key); - part.setSkewedValueLocationMap(slk, locUri.toString()); - } else { - List slk = new ArrayList(key); - tbl.setSkewedValueLocationMap(slk, locUri.toString()); - } - } catch (URISyntaxException e) { - throw new HiveException(e); - } - } - - environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS); - } else if (alterTbl.getOp() == AlterTableTypes.ALTERBUCKETNUM) { - if (part != null) { - if (part.getBucketCount() == alterTbl.getNumberBuckets()) { - return null; - } - part.setBucketCount(alterTbl.getNumberBuckets()); - } else { - if (tbl.getNumBuckets() == alterTbl.getNumberBuckets()) { - return null; - } - tbl.setNumBuckets(alterTbl.getNumberBuckets()); - } } else { throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 15a266d268..04d12075a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol; import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.LoadFileDesc; @@ -63,6 +62,7 @@ import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.util.DirectionUtils; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -818,9 +818,8 @@ private void updatePartitionBucketSortColumns(Hive db, Table table, Partition pa for (SortCol sortCol : sortCols) { if (sortCol.getIndexes().get(0) < partn.getCols().size()) { newSortCols.add(new Order( - partn.getCols().get(sortCol.getIndexes().get(0)).getName(), - sortCol.getSortOrder() == '+' ? BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC : - BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC)); + partn.getCols().get(sortCol.getIndexes().get(0)).getName(), + DirectionUtils.signToCode(sortCol.getSortOrder()))); } else { // If the table is sorted on a partition column, not valid for sorting updateSortCols = false; diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java index 86dfef091c..7f8f9a7631 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.hooks; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.metadata.DummyPartition; @@ -34,8 +32,7 @@ * object may be a table, partition, dfs directory or a local directory. */ public class WriteEntity extends Entity implements Serializable { - - private static final Logger LOG = LoggerFactory.getLogger(WriteEntity.class); + private static final long serialVersionUID = 1L; private boolean isTempURI = false; private transient boolean isDynamicPartitionWrite = false; @@ -200,38 +197,42 @@ public boolean isTempURI() { */ public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTypes op) { switch (op) { - case RENAME_COLUMN: - case ADDCLUSTERSORTCOLUMN: - case ADDFILEFORMAT: - case ADDSERDE: - case DROPPROPS: - case REPLACE_COLUMNS: - case ARCHIVE: - case UNARCHIVE: - case ALTERLOCATION: - case DROPPARTITION: - case RENAMEPARTITION: - case ADDSKEWEDBY: - case ALTERSKEWEDLOCATION: - case ALTERBUCKETNUM: - case ALTERPARTITION: - case ADD_COLUMNS: - case RENAME: - case TRUNCATE: - case MERGEFILES: - case DROP_CONSTRAINT: return WriteType.DDL_EXCLUSIVE; - - case ADDPARTITION: - case ADDSERDEPROPS: - case ADDPROPS: - case UPDATESTATS: - return WriteType.DDL_SHARED; - - case COMPACT: - case TOUCH: return WriteType.DDL_NO_LOCK; - - default: - throw new RuntimeException("Unknown operation " + op.toString()); + case RENAME_COLUMN: + case CLUSTERED_BY: + case NOT_SORTED: + case NOT_CLUSTERED: + case SET_FILE_FORMAT: + case SET_SERDE: + case DROPPROPS: + case REPLACE_COLUMNS: + case ARCHIVE: + case UNARCHIVE: + case SET_LOCATION: + case DROPPARTITION: + case RENAMEPARTITION: + case SKEWED_BY: + case SET_SKEWED_LOCATION: + case INTO_BUCKETS: + case ALTERPARTITION: + case ADD_COLUMNS: + case RENAME: + case TRUNCATE: + case MERGEFILES: + case DROP_CONSTRAINT: + return WriteType.DDL_EXCLUSIVE; + + case ADDPARTITION: + case SET_SERDE_PROPS: + case ADDPROPS: + case UPDATESTATS: + return WriteType.DDL_SHARED; + + case COMPACT: + case TOUCH: + return WriteType.DDL_NO_LOCK; + + default: + throw new RuntimeException("Unknown operation " + op.toString()); } } public boolean isDynamicPartitionWrite() { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 995ff9b68c..f65cc25642 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ColumnStatsList; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.plan.ColStatistics; @@ -75,6 +74,7 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.apache.hadoop.hive.ql.util.DirectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -389,15 +389,9 @@ public RelNode toRel(ToRelContext context) { for (int i=0; i partSpec) + private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName, Map partSpec) throws SemanticException { - HashMap mapProp = getProps((ASTNode) (ast.getChild(0)) - .getChild(0)); - AlterTableDesc alterTblDesc = new AlterTableDesc( - AlterTableTypes.ADDSERDEPROPS); - alterTblDesc.setProps(mapProp); - alterTblDesc.setOldName(tableName); - alterTblDesc.setPartSpec(partSpec); + Map mapProp = getProps((ASTNode) (ast.getChild(0)).getChild(0)); + AlterTableSetSerdePropsDesc alterTblDesc = new AlterTableSetSerdePropsDesc(tableName, partSpec, mapProp); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, AlterTableTypes.SET_SERDE_PROPS, false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableSerde(ASTNode ast, String tableName, - HashMap partSpec) + private void analyzeAlterTableSerde(ASTNode ast, String tableName, Map partSpec) throws SemanticException { - String serdeName = unescapeSQLString(ast.getChild(0).getText()); - AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDSERDE); - if (ast.getChildCount() > 1) { - HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) - .getChild(0)); - alterTblDesc.setProps(mapProp); - } - alterTblDesc.setOldName(tableName); - alterTblDesc.setSerdeName(serdeName); - alterTblDesc.setPartSpec(partSpec); + Map props = (ast.getChildCount() > 1) ? getProps((ASTNode) (ast.getChild(1)).getChild(0)) : null; + AlterTableSetSerdeDesc alterTblDesc = new AlterTableSetSerdeDesc(tableName, partSpec, props, serdeName); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, AlterTableTypes.SET_SERDE, false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, - HashMap partSpec) + private void analyzeAlterTableFileFormat(ASTNode ast, String tableName, Map partSpec) throws SemanticException { - StorageFormat format = new StorageFormat(conf); ASTNode child = (ASTNode) ast.getChild(0); - if (!format.fillStorageFormat(child)) { throw new AssertionError("Unknown token " + child.getText()); } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, format.getInputFormat(), - format.getOutputFormat(), format.getSerde(), format.getStorageHandler(), partSpec); + AlterTableSetFileFormatDesc alterTblDesc = new AlterTableSetFileFormatDesc(tableName, partSpec, + format.getInputFormat(), format.getOutputFormat(), format.getSerde()); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, AlterTableTypes.SET_FILE_FORMAT, false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private WriteType determineAlterTableWriteType(Table tab, AlterTableDesc desc, AlterTableTypes op) { @@ -2084,29 +2076,28 @@ private void analyzeAlterTableOwner(ASTNode ast, String tableName) throws Semant rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); } - private void analyzeAlterTableLocation(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { + private void analyzeAlterTableLocation(ASTNode ast, String tableName, Map partSpec) + throws SemanticException { String newLocation = unescapeSQLString(ast.getChild(0).getText()); try { - // To make sure host/port pair is valid, the status of the location - // does not matter + // To make sure host/port pair is valid, the status of the location does not matter FileSystem.get(new URI(newLocation), conf).getFileStatus(new Path(newLocation)); } catch (FileNotFoundException e) { - // Only check host/port pair is valid, wheter the file exist or not does not matter + // Only check host/port pair is valid, whether the file exist or not does not matter } catch (Exception e) { throw new SemanticException("Cannot connect to namenode, please check if host/port pair for " + newLocation + " is valid", e); } + addLocationToOutputs(newLocation); - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, newLocation, partSpec); + AlterTableSetLocationDesc alterTblDesc = new AlterTableSetLocationDesc(tableName, partSpec, newLocation); Table tbl = getTable(tableName); if (AcidUtils.isTransactionalTable(tbl)) { setAcidDdlDesc(alterTblDesc); } - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, AlterTableTypes.SET_LOCATION, false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void analyzeAlterTablePartMergeFiles(ASTNode ast, @@ -2253,17 +2244,16 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, } } - private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, - HashMap partSpec) throws SemanticException { + private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, Map partSpec) + throws SemanticException { - AlterTableDesc alterTblDesc; + AbstractAlterTableDesc alterTblDesc; switch (ast.getChild(0).getType()) { case HiveParser.TOK_NOT_CLUSTERED: - alterTblDesc = new AlterTableDesc(tableName, -1, new ArrayList(), - new ArrayList(), partSpec); + alterTblDesc = new AlterTableNotClusteredDesc(tableName, partSpec); break; case HiveParser.TOK_NOT_SORTED: - alterTblDesc = new AlterTableDesc(tableName, true, partSpec); + alterTblDesc = new AlterTableNotSortedDesc(tableName, partSpec); break; case HiveParser.TOK_ALTERTABLE_BUCKETS: ASTNode buckets = (ASTNode) ast.getChild(0); @@ -2280,14 +2270,13 @@ private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, throw new SemanticException(ErrorMsg.INVALID_BUCKET_NUMBER.getMsg()); } - alterTblDesc = new AlterTableDesc(tableName, numBuckets, - bucketCols, sortCols, partSpec); + alterTblDesc = new AlterTableClusteredByDesc(tableName, partSpec, numBuckets, bucketCols, sortCols); break; default: throw new SemanticException("Invalid operation " + ast.getChild(0).getType()); } - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, alterTblDesc.getType(), false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void analyzeAlterTableCompact(ASTNode ast, String tableName, @@ -3414,20 +3403,19 @@ private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), renamePartitionDesc))); } - private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, - HashMap partSpec) throws SemanticException { + private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, Map partSpec) + throws SemanticException { Table tab = getTable(tblName, true); - if (tab.getBucketCols() == null || tab.getBucketCols().isEmpty()) { + if (CollectionUtils.isEmpty(tab.getBucketCols())) { throw new SemanticException(ErrorMsg.ALTER_BUCKETNUM_NONBUCKETIZED_TBL.getMsg()); } - validateAlterTableType(tab, AlterTableTypes.ALTERBUCKETNUM); + validateAlterTableType(tab, AlterTableTypes.INTO_BUCKETS); inputs.add(new ReadEntity(tab)); - int bucketNum = Integer.parseInt(ast.getChild(0).getText()); - AlterTableDesc alterBucketNum = new AlterTableDesc(tblName, partSpec, bucketNum); + int numberOfBuckets = Integer.parseInt(ast.getChild(0).getText()); + AlterTableIntoBucketsDesc alterBucketNum = new AlterTableIntoBucketsDesc(tblName, partSpec, numberOfBuckets); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterBucketNum))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterBucketNum))); } private void analyzeAlterTableAddCols(String[] qualified, ASTNode ast, Map partSpec) @@ -4209,7 +4197,7 @@ private void addTableDropPartsOutputs(Table tab, * node * @throws SemanticException */ - private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws SemanticException { + private void analyzeAlterTableSkewedby(String[] qualified, ASTNode ast) throws SemanticException { /** * Throw an error if the user tries to use the DDL with * hive.internal.ddl.list.bucketing.enable set to false. @@ -4221,16 +4209,13 @@ private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws S inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); - validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); + validateAlterTableType(tab, AlterTableTypes.SKEWED_BY); String tableName = getDotName(qualified); if (ast.getChildCount() == 0) { /* Convert a skewed table to non-skewed table. */ - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, - new ArrayList(), new ArrayList>()); - alterTblDesc.setStoredAsSubDirectories(false); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + AlterTableNotSkewedDesc alterTblDesc = new AlterTableNotSkewedDesc(tableName); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } else { switch (((ASTNode) ast.getChild(0)).getToken().getType()) { case HiveParser.TOK_TABLESKEWED: @@ -4254,17 +4239,14 @@ private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws S */ private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) throws SemanticException { - List skewedColNames = tab.getSkewedColNames(); + List skewedColNames = tab.getSkewedColNames(); List> skewedColValues = tab.getSkewedColValues(); - if ((skewedColNames == null) || (skewedColNames.size() == 0) || (skewedColValues == null) - || (skewedColValues.size() == 0)) { + if (CollectionUtils.isEmpty(skewedColNames) || CollectionUtils.isEmpty(skewedColValues)) { throw new SemanticException(ErrorMsg.ALTER_TBL_STOREDASDIR_NOT_SKEWED.getMsg(tableName)); } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, - skewedColNames, skewedColValues); - alterTblDesc.setStoredAsSubDirectories(false); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + + AlterTableSkewedByDesc alterTblDesc = new AlterTableSkewedByDesc(tableName, skewedColNames, skewedColValues, false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } /** @@ -4274,8 +4256,7 @@ private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) * @param tab * @throws SemanticException */ - private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) - throws SemanticException { + private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) throws SemanticException { List skewedColNames = new ArrayList(); List> skewedValues = new ArrayList>(); /* skewed column names. */ @@ -4286,17 +4267,15 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) // stored as directories boolean storedAsDirs = analyzeStoredAdDirs(skewedNode); + if (tab != null) { + /* Validate skewed information. */ + ValidationUtility.validateSkewedInformation( + ParseUtils.validateColumnNameUniqueness(tab.getCols()), skewedColNames, skewedValues); + } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, - skewedColNames, skewedValues); - alterTblDesc.setStoredAsSubDirectories(storedAsDirs); - /** - * Validate information about skewed table - */ - alterTblDesc.setTable(tab); - alterTblDesc.validate(); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + AlterTableSkewedByDesc alterTblDesc = new AlterTableSkewedByDesc(tableName, skewedColNames, skewedValues, + storedAsDirs); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } /** @@ -4366,10 +4345,9 @@ private void analyzeAlterTableSkewedLocation(ASTNode ast, String tableName, } } } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, locations, partSpec); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + AlterTableSetSkewedLocationDesc alterTblDesc = new AlterTableSetSkewedLocationDesc(tableName, partSpec, locations); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, AlterTableTypes.SET_SKEWED_LOCATION, false); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), alterTblDesc))); } private void addLocationToOutputs(String newLocation) throws SemanticException { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index edb20ae7e6..027ef93fc9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -250,6 +250,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFSurrogateKey; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTFInline; +import org.apache.hadoop.hive.ql.util.DirectionUtils; import org.apache.hadoop.hive.ql.util.ResourceDownloader; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.Deserializer; @@ -6896,8 +6897,8 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, StringBuilder order = new StringBuilder(); StringBuilder nullOrder = new StringBuilder(); for (int sortOrder : sortOrders) { - order.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? '+' : '-'); - nullOrder.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z'); + order.append(DirectionUtils.codeToSign(sortOrder)); + nullOrder.append(sortOrder == DirectionUtils.ASCENDING_CODE ? 'a' : 'z'); } input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(), maxReducers, (AcidUtils.isFullAcidTable(dest_tab) ? diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index cb6958a327..9aa7e73fa7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -21,17 +21,13 @@ import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.ParseUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.Explain.Level; import com.google.common.collect.ImmutableList; import java.io.Serializable; -import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -52,14 +48,15 @@ */ public static enum AlterTableTypes { RENAME("rename"), ADD_COLUMNS("add columns"), REPLACE_COLUMNS("replace columns"), - ADDPROPS("add props"), DROPPROPS("drop props"), ADDSERDE("add serde"), ADDSERDEPROPS("add serde props"), - ADDFILEFORMAT("add fileformat"), ADDCLUSTERSORTCOLUMN("add cluster sort column"), + ADDPROPS("add props"), DROPPROPS("drop props"), SET_SERDE("set serde"), SET_SERDE_PROPS("set serde props"), + SET_FILE_FORMAT("add fileformat"), CLUSTERED_BY("clustered by"), NOT_SORTED("not sorted"), + NOT_CLUSTERED("not clustered"), RENAME_COLUMN("rename column"), ADDPARTITION("add partition"), TOUCH("touch"), ARCHIVE("archieve"), - UNARCHIVE("unarchieve"), ALTERLOCATION("alter location"), + UNARCHIVE("unarchieve"), SET_LOCATION("set location"), DROPPARTITION("drop partition"), RENAMEPARTITION("rename partition"), // Note: used in RenamePartitionDesc, not here. - ADDSKEWEDBY("add skew column"), - ALTERSKEWEDLOCATION("alter skew location"), ALTERBUCKETNUM("alter bucket number"), + SKEWED_BY("skewed by"), NOT_SKEWED("not skewed"), + SET_SKEWED_LOCATION("alter skew location"), INTO_BUCKETS("alter bucket number"), ALTERPARTITION("alter partition"), // Note: this is never used in AlterTableDesc. COMPACT("compact"), TRUNCATE("truncate"), MERGEFILES("merge files"), DROP_CONSTRAINT("drop constraint"), @@ -89,40 +86,22 @@ alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.RENAME_COLUMN); alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDPROPS); alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.DROPPROPS); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDSERDE); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDSERDEPROPS); - alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.ADDFILEFORMAT); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.SET_SERDE); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.SET_SERDE_PROPS); + alterTableTypesWithPartialSpec.add(AlterTableDesc.AlterTableTypes.SET_FILE_FORMAT); } AlterTableTypes op; String oldName; String newName; - String serdeName; Map props; - String inputFormat; - String outputFormat; - String storageHandler; - int numberBuckets; - ArrayList bucketColumns; - ArrayList sortColumns; - - String oldColName; - String newColName; - String newColType; - String newColComment; + boolean expectView; HashMap partSpec; - private String newLocation; boolean protectModeEnable; ProtectModeType protectModeType; - Map, String> skewedLocations; boolean isTurnOffSkewed = false; - boolean isStoredAsSubDirectories = false; - List skewedColNames; - List> skewedColValues; - Table tableForSkewedColValidation; boolean isDropIfExists = false; - boolean isTurnOffSorting = false; boolean isCascade = false; EnvironmentContext environmentContext; ReplicationSpec replicationSpec; @@ -185,78 +164,6 @@ public AlterTableDesc(AlterTableTypes alterType, HashMap partSpe this.expectView = expectView; } - /** - * - * @param name - * name of the table - * @param inputFormat - * new table input format - * @param outputFormat - * new table output format - * @param partSpec - * @throws SemanticException - */ - public AlterTableDesc(String name, String inputFormat, String outputFormat, - String serdeName, String storageHandler, HashMap partSpec) throws SemanticException { - super(); - op = AlterTableTypes.ADDFILEFORMAT; - setOldName(name); - this.inputFormat = inputFormat; - this.outputFormat = outputFormat; - this.serdeName = serdeName; - this.storageHandler = storageHandler; - this.partSpec = partSpec; - } - - public AlterTableDesc(String tableName, int numBuckets, - List bucketCols, List sortCols, HashMap partSpec) throws SemanticException { - setOldName(tableName); - op = AlterTableTypes.ADDCLUSTERSORTCOLUMN; - numberBuckets = numBuckets; - bucketColumns = new ArrayList(bucketCols); - sortColumns = new ArrayList(sortCols); - this.partSpec = partSpec; - } - - public AlterTableDesc(String tableName, boolean sortingOff, HashMap partSpec) throws SemanticException { - setOldName(tableName); - op = AlterTableTypes.ADDCLUSTERSORTCOLUMN; - isTurnOffSorting = sortingOff; - this.partSpec = partSpec; - } - - public AlterTableDesc(String tableName, String newLocation, - HashMap partSpec) throws SemanticException { - op = AlterTableTypes.ALTERLOCATION; - setOldName(tableName); - this.newLocation = newLocation; - this.partSpec = partSpec; - } - - public AlterTableDesc(String tableName, Map, String> locations, - HashMap partSpec) throws SemanticException { - op = AlterTableTypes.ALTERSKEWEDLOCATION; - setOldName(tableName); - this.skewedLocations = locations; - this.partSpec = partSpec; - } - - public AlterTableDesc(String tableName, boolean turnOffSkewed, - List skewedColNames, List> skewedColValues) throws SemanticException { - setOldName(tableName); - op = AlterTableTypes.ADDSKEWEDBY; - this.isTurnOffSkewed = turnOffSkewed; - this.skewedColNames = new ArrayList(skewedColNames); - this.skewedColValues = new ArrayList>(skewedColValues); - } - - public AlterTableDesc(String tableName, HashMap partSpec, int numBuckets) throws SemanticException { - op = AlterTableTypes.ALTERBUCKETNUM; - setOldName(tableName); - this.partSpec = partSpec; - this.numberBuckets = numBuckets; - } - public AlterTableDesc(String tableName, PrincipalDesc ownerPrincipal) { op = AlterTableTypes.OWNER; this.oldName = tableName; @@ -328,22 +235,6 @@ public void setOp(AlterTableTypes op) { this.op = op; } - /** - * @return the serdeName - */ - @Explain(displayName = "deserializer library") - public String getSerdeName() { - return serdeName; - } - - /** - * @param serdeName - * the serdeName to set - */ - public void setSerdeName(String serdeName) { - this.serdeName = serdeName; - } - /** * @return the props */ @@ -360,159 +251,6 @@ public void setProps(Map props) { this.props = props; } - /** - * @return the input format - */ - @Explain(displayName = "input format", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getInputFormat() { - return inputFormat; - } - - /** - * @param inputFormat - * the input format to set - */ - public void setInputFormat(String inputFormat) { - this.inputFormat = inputFormat; - } - - /** - * @return the output format - */ - @Explain(displayName = "output format", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getOutputFormat() { - return outputFormat; - } - - /** - * @param outputFormat - * the output format to set - */ - public void setOutputFormat(String outputFormat) { - this.outputFormat = outputFormat; - } - - /** - * @return the storage handler - */ - @Explain(displayName = "storage handler", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getStorageHandler() { - return storageHandler; - } - - /** - * @param storageHandler - * the storage handler to set - */ - public void setStorageHandler(String storageHandler) { - this.storageHandler = storageHandler; - } - - /** - * @return the number of buckets - */ - public int getNumberBuckets() { - return numberBuckets; - } - - /** - * @param numberBuckets - * the number of buckets to set - */ - public void setNumberBuckets(int numberBuckets) { - this.numberBuckets = numberBuckets; - } - - /** - * @return the bucket columns - */ - public ArrayList getBucketColumns() { - return bucketColumns; - } - - /** - * @param bucketColumns - * the bucket columns to set - */ - public void setBucketColumns(ArrayList bucketColumns) { - this.bucketColumns = bucketColumns; - } - - /** - * @return the sort columns - */ - public ArrayList getSortColumns() { - return sortColumns; - } - - /** - * @param sortColumns - * the sort columns to set - */ - public void setSortColumns(ArrayList sortColumns) { - this.sortColumns = sortColumns; - } - - /** - * @return old column name - */ - public String getOldColName() { - return oldColName; - } - - /** - * @param oldColName - * the old column name - */ - public void setOldColName(String oldColName) { - this.oldColName = oldColName; - } - - /** - * @return new column name - */ - public String getNewColName() { - return newColName; - } - - /** - * @param newColName - * the new column name - */ - public void setNewColName(String newColName) { - this.newColName = newColName; - } - - /** - * @return new column type - */ - public String getNewColType() { - return newColType; - } - - /** - * @param newType - * new column's type - */ - public void setNewColType(String newType) { - newColType = newType; - } - - /** - * @return new column's comment - */ - public String getNewColComment() { - return newColComment; - } - - /** - * @param newComment - * new column's comment - */ - public void setNewColComment(String newComment) { - newColComment = newComment; - } - /** * @return whether to expect a view being altered */ @@ -542,20 +280,6 @@ public void setPartSpec(HashMap partSpec) { this.partSpec = partSpec; } - /** - * @return new location - */ - public String getNewLocation() { - return newLocation; - } - - /** - * @param newLocation new location - */ - public void setNewLocation(String newLocation) { - this.newLocation = newLocation; - } - public boolean isProtectModeEnable() { return protectModeEnable; } @@ -571,26 +295,6 @@ public ProtectModeType getProtectModeType() { public void setProtectModeType(ProtectModeType protectModeType) { this.protectModeType = protectModeType; } - /** - * @return the skewedLocations - */ - public Map, String> getSkewedLocations() { - return skewedLocations; - } - - /** - * @param skewedLocations the skewedLocations to set - */ - public void setSkewedLocations(Map, String> skewedLocations) { - this.skewedLocations = skewedLocations; - } - - /** - * @return isTurnOffSorting - */ - public boolean isTurnOffSorting() { - return isTurnOffSorting; - } /** * @return the turnOffSkewed @@ -606,69 +310,6 @@ public void setTurnOffSkewed(boolean turnOffSkewed) { this.isTurnOffSkewed = turnOffSkewed; } - /** - * @return the skewedColNames - */ - public List getSkewedColNames() { - return skewedColNames; - } - - /** - * @param skewedColNames the skewedColNames to set - */ - public void setSkewedColNames(List skewedColNames) { - this.skewedColNames = skewedColNames; - } - - /** - * @return the skewedColValues - */ - public List> getSkewedColValues() { - return skewedColValues; - } - - /** - * @param skewedColValues the skewedColValues to set - */ - public void setSkewedColValues(List> skewedColValues) { - this.skewedColValues = skewedColValues; - } - - /** - * Validate alter table description. - * - * @throws SemanticException - */ - public void validate() throws SemanticException { - if (null != tableForSkewedColValidation) { - /* Validate skewed information. */ - ValidationUtility.validateSkewedInformation( - ParseUtils.validateColumnNameUniqueness(tableForSkewedColValidation.getCols()), - this.getSkewedColNames(), this.getSkewedColValues()); - } - } - - /** - * @param table the table to set - */ - public void setTable(Table table) { - this.tableForSkewedColValidation = table; - } - - /** - * @return the isStoredAsSubDirectories - */ - public boolean isStoredAsSubDirectories() { - return isStoredAsSubDirectories; - } - - /** - * @param isStoredAsSubDirectories the isStoredAsSubDirectories to set - */ - public void setStoredAsSubDirectories(boolean isStoredAsSubDirectories) { - this.isStoredAsSubDirectories = isStoredAsSubDirectories; - } - /** * @param isDropIfExists the isDropIfExists to set */ @@ -739,7 +380,7 @@ public boolean mayNeedWriteId() { case RENAME: case REPLACE_COLUMNS: case ADD_COLUMNS: - case ALTERLOCATION: + case SET_LOCATION: case UPDATE_COLUMNS: return true; // RENAMEPARTITION is handled in RenamePartitionDesc default: return false; diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 97ef823bfd..67a5e6de46 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -73,8 +73,8 @@ import org.apache.hadoop.hive.ql.io.RecordIdentifier; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.util.DirectionUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; import org.apache.hadoop.hive.shims.ShimLoader; @@ -634,19 +634,13 @@ private String buildMmCompactionCtQuery( List sortCols = t.getSd().getSortCols(); if (sortCols.size() > 0) { query.append("SORTED BY ("); - List sortKeys = new ArrayList(); isFirst = true; for (Order sortCol : sortCols) { if (!isFirst) { query.append(", "); } isFirst = false; - query.append(sortCol.getCol()).append(" "); - if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { - query.append("ASC"); - } else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { - query.append("DESC"); - } + query.append(sortCol.getCol()).append(" ").append(DirectionUtils.codeToText(sortCol.getOrder())); } query.append(") "); } diff --git ql/src/java/org/apache/hadoop/hive/ql/util/DirectionUtils.java ql/src/java/org/apache/hadoop/hive/ql/util/DirectionUtils.java new file mode 100644 index 0000000000..c1ba2fcd9a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/util/DirectionUtils.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.util; + +import org.apache.calcite.rel.RelFieldCollation.Direction; +import org.apache.hadoop.hive.ql.parse.HiveParser; + +import com.google.common.collect.ImmutableMap; + +/** + * Utility class for converting different direction description types. + */ +public final class DirectionUtils { + private DirectionUtils() { + throw new UnsupportedOperationException("DirectionUtils should not be instantiated"); + } + + public static final int ASCENDING_CODE = 1; + public static final int DESCENDING_CODE = 0; + + private static final ImmutableMap ASCENDING_DATA = ImmutableMap.of( + "code", ASCENDING_CODE, + "sign", '+', + "text", "ASC", + "direction", Direction.ASCENDING, + "token", HiveParser.TOK_TABSORTCOLNAMEASC + ); + + private static final ImmutableMap DESCENDING_DATA = ImmutableMap.of( + "code", DESCENDING_CODE, + "sign", '-', + "text", "DESC", + "direction", Direction.DESCENDING, + "token", HiveParser.TOK_TABSORTCOLNAMEDESC + ); + + public static String codeToText(int code) { + return (String)convert("code", code, "text"); + } + + public static char codeToSign(int code) { + return (char)convert("code", code, "sign"); + } + + public static int tokenToCode(int token) { + return (int)convert("token", token, "code"); + } + + public static int signToCode(char sign) { + return (int)convert("sign", sign, "code"); + } + + public static Direction codeToDirection(int code) { + return (Direction)convert("code", code, "direction"); + } + + private static Object convert(String typeFrom, Object value, String typeTo) { + Object ascObject = ASCENDING_DATA.get(typeFrom); + Object descObject = DESCENDING_DATA.get(typeFrom); + if (ascObject.equals(value)) { + return ASCENDING_DATA.get(typeTo); + } else if (descObject.equals(value)) { + return DESCENDING_DATA.get(typeTo); + } + + throw new IllegalArgumentException("The value " + value + " isn not a valid value for " + typeFrom); + } +} diff --git ql/src/test/queries/clientpositive/table_storage.q ql/src/test/queries/clientpositive/table_storage.q new file mode 100644 index 0000000000..a043b7a525 --- /dev/null +++ ql/src/test/queries/clientpositive/table_storage.q @@ -0,0 +1,48 @@ +CREATE TABLE t (key STRING, val STRING); +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +ALTER TABLE t CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t INTO 3 BUCKETS; +ALTER TABLE t INTO 3 BUCKETS; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t NOT SORTED; +ALTER TABLE t NOT SORTED; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t NOT CLUSTERED; +ALTER TABLE t NOT CLUSTERED; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t SKEWED BY (key) ON (("a"), ("b")) STORED AS DIRECTORIES; +ALTER TABLE t SKEWED BY (key) ON (("a"), ("b")) STORED AS DIRECTORIES; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t SET SKEWED LOCATION ('a'='${hiveconf:hive.metastore.warehouse.dir}/t/key=a','b'='${hiveconf:hive.metastore.warehouse.dir}/t/key=b'); +ALTER TABLE t SET SKEWED LOCATION ('a'='${hiveconf:hive.metastore.warehouse.dir}/t/key=a','b'='${hiveconf:hive.metastore.warehouse.dir}/t/key=b'); +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t NOT SKEWED; +ALTER TABLE t NOT SKEWED; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t SET FILEFORMAT parquet; +ALTER TABLE t SET FILEFORMAT parquet; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t SET LOCATION "file:///tmp/location"; +ALTER TABLE t SET LOCATION "file:///tmp/location"; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t SET SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"; +ALTER TABLE t SET SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"; +SHOW CREATE TABLE t; + +EXPLAIN ALTER TABLE t SET SERDEPROPERTIES('property1'='value1', 'property2'='value2'); +ALTER TABLE t SET SERDEPROPERTIES('property1'='value1', 'property2'='value2'); +SHOW CREATE TABLE t; + + diff --git ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out index b50786a64c..8c4af26d34 100644 --- ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out +++ ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out @@ -11,4 +11,4 @@ PREHOOK: type: ALTERTABLE_LOCATION PREHOOK: Input: default@testwrongloc PREHOOK: Output: default@testwrongloc #### A masked pattern was here #### -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. {0} is not absolute. Please specify a complete absolute uri. relative/testwrongloc +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. {0} is not absolute. Please specify a complete absolute uri. relative/testwrongloc diff --git ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out index 667c7d5d38..1c02789139 100644 --- ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out +++ ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out @@ -18,4 +18,4 @@ PREHOOK: query: alter table aa set serdeproperties ("input.regex" = "[^\\](.*)", PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@aa PREHOOK: Output: default@aa -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. at least one column must be specified for the table +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. at least one column must be specified for the table diff --git ql/src/test/results/clientnegative/orc_change_fileformat.q.out ql/src/test/results/clientnegative/orc_change_fileformat.q.out index db454fe220..f7c5667430 100644 --- ql/src/test/results/clientnegative/orc_change_fileformat.q.out +++ ql/src/test/results/clientnegative/orc_change_fileformat.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set fileformat textfile PREHOOK: type: ALTERTABLE_FILEFORMAT PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Changing file format (from ORC) is not supported for table default.src_orc diff --git ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out index cd3e168b3d..4a552c1f40 100644 --- ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out +++ ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set fileformat textfile PREHOOK: type: ALTERTABLE_FILEFORMAT PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Changing file format (from ORC) is not supported for table default.src_orc diff --git ql/src/test/results/clientnegative/orc_change_serde.q.out ql/src/test/results/clientnegative/orc_change_serde.q.out index 7f882b520c..88dfe44c3a 100644 --- ql/src/test/results/clientnegative/orc_change_serde.q.out +++ ql/src/test/results/clientnegative/orc_change_serde.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set serde 'org.apache.hadoop.hive.serde2.col PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible diff --git ql/src/test/results/clientnegative/orc_change_serde_acid.q.out ql/src/test/results/clientnegative/orc_change_serde_acid.q.out index 612207b936..a1a2805280 100644 --- ql/src/test/results/clientnegative/orc_change_serde_acid.q.out +++ ql/src/test/results/clientnegative/orc_change_serde_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set serde 'org.apache.hadoop.hive.serde2.col PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible diff --git ql/src/test/results/clientnegative/strict_managed_tables6.q.out ql/src/test/results/clientnegative/strict_managed_tables6.q.out index b6d23d1fac..cf7786e1f5 100644 --- ql/src/test/results/clientnegative/strict_managed_tables6.q.out +++ ql/src/test/results/clientnegative/strict_managed_tables6.q.out @@ -27,4 +27,4 @@ PREHOOK: type: ALTERTABLE_LOCATION PREHOOK: Input: smt6@strict_managed_tables1_tab1 #### A masked pattern was here #### PREHOOK: Output: smt6@strict_managed_tables1_tab1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Cannot change location of a managed table hive.smt6.strict_managed_tables1_tab1 as it is enabled for replication. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask2. Unable to alter table. Cannot change location of a managed table hive.smt6.strict_managed_tables1_tab1 as it is enabled for replication. diff --git ql/src/test/results/clientpositive/table_storage.q.out ql/src/test/results/clientpositive/table_storage.q.out new file mode 100644 index 0000000000..4311350afb --- /dev/null +++ ql/src/test/results/clientpositive/table_storage.q.out @@ -0,0 +1,547 @@ +PREHOOK: query: CREATE TABLE t (key STRING, val STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t +POSTHOOK: query: CREATE TABLE t (key STRING, val STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Clustered By + bucket columns: key + number of buckets: 2 + sort columns: key ASC + table name: default.t + +PREHOOK: query: ALTER TABLE t CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +CLUSTERED BY ( + key) +SORTED BY ( + key ASC) +INTO 2 BUCKETS +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t INTO 3 BUCKETS +PREHOOK: type: ALTERTABLE_BUCKETNUM +PREHOOK: Input: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t INTO 3 BUCKETS +POSTHOOK: type: ALTERTABLE_BUCKETNUM +POSTHOOK: Input: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Into Buckets + number of buckets: 3 + table name: default.t + +PREHOOK: query: ALTER TABLE t INTO 3 BUCKETS +PREHOOK: type: ALTERTABLE_BUCKETNUM +PREHOOK: Input: default@t +POSTHOOK: query: ALTER TABLE t INTO 3 BUCKETS +POSTHOOK: type: ALTERTABLE_BUCKETNUM +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +CLUSTERED BY ( + key) +SORTED BY ( + key ASC) +INTO 3 BUCKETS +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t NOT SORTED +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t NOT SORTED +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Not Sorted + table name: default.t + +PREHOOK: query: ALTER TABLE t NOT SORTED +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t NOT SORTED +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +CLUSTERED BY ( + key) +INTO 3 BUCKETS +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t NOT CLUSTERED +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t NOT CLUSTERED +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Not Clustered + table name: default.t + +PREHOOK: query: ALTER TABLE t NOT CLUSTERED +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t NOT CLUSTERED +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t SKEWED BY (key) ON (("a"), ("b")) STORED AS DIRECTORIES +PREHOOK: type: ALTERTABLE_SKEWED +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t SKEWED BY (key) ON (("a"), ("b")) STORED AS DIRECTORIES +POSTHOOK: type: ALTERTABLE_SKEWED +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Skewed By + skewedColumnNames: key + skewedColumnValues: [a], [b] + table name: default.t + isStoredAsDirectories: true + +PREHOOK: query: ALTER TABLE t SKEWED BY (key) ON (("a"), ("b")) STORED AS DIRECTORIES +PREHOOK: type: ALTERTABLE_SKEWED +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t SKEWED BY (key) ON (("a"), ("b")) STORED AS DIRECTORIES +POSTHOOK: type: ALTERTABLE_SKEWED +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +SKEWED BY (key) + ON (('a'),('b')) + STORED AS DIRECTORIES +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION +PREHOOK: Input: default@t +PREHOOK: Output: default@t +#### A masked pattern was here #### +POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 +#### A masked pattern was here #### + table name: default.t + +#### A masked pattern was here #### +PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION +PREHOOK: Input: default@t +PREHOOK: Output: default@t +#### A masked pattern was here #### +POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +#### A masked pattern was here #### +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +SKEWED BY (key) + ON (('a'),('b')) + STORED AS DIRECTORIES +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t NOT SKEWED +PREHOOK: type: ALTERTABLE_SKEWED +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t NOT SKEWED +POSTHOOK: type: ALTERTABLE_SKEWED +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Not Skewed + table name: default.t + +PREHOOK: query: ALTER TABLE t NOT SKEWED +PREHOOK: type: ALTERTABLE_SKEWED +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t NOT SKEWED +POSTHOOK: type: ALTERTABLE_SKEWED +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t SET FILEFORMAT parquet +PREHOOK: type: ALTERTABLE_FILEFORMAT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t SET FILEFORMAT parquet +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Set File Format + input format: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat + output format: org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat + serde name: org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe + table name: default.t + +PREHOOK: query: ALTER TABLE t SET FILEFORMAT parquet +PREHOOK: type: ALTERTABLE_FILEFORMAT +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t SET FILEFORMAT parquet +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: type: ALTERTABLE_LOCATION +PREHOOK: Input: default@t +PREHOOK: Output: default@t +#### A masked pattern was here #### +POSTHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 +#### A masked pattern was here #### + table name: default.t + +#### A masked pattern was here #### +PREHOOK: type: ALTERTABLE_LOCATION +PREHOOK: Input: default@t +PREHOOK: Output: default@t +#### A masked pattern was here #### +POSTHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +#### A masked pattern was here #### +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t SET SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t SET SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Set Serde + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + table name: default.t + +PREHOOK: query: ALTER TABLE t SET SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t SET SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN ALTER TABLE t SET SERDEPROPERTIES('property1'='value1', 'property2'='value2') +PREHOOK: type: ALTERTABLE_SERDEPROPERTIES +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: EXPLAIN ALTER TABLE t SET SERDEPROPERTIES('property1'='value1', 'property2'='value2') +POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Set Serde Props + props: + property1 value1 + property2 value2 + table name: default.t + +PREHOOK: query: ALTER TABLE t SET SERDEPROPERTIES('property1'='value1', 'property2'='value2') +PREHOOK: type: ALTERTABLE_SERDEPROPERTIES +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: ALTER TABLE t SET SERDEPROPERTIES('property1'='value1', 'property2'='value2') +POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: SHOW CREATE TABLE t +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: default@t +POSTHOOK: query: SHOW CREATE TABLE t +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: default@t +CREATE TABLE `t`( + `key` string, + `val` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +WITH SERDEPROPERTIES ( + 'property1'='value1', + 'property2'='value2') +STORED AS INPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( + 'bucketing_version'='2', +#### A masked pattern was here #### diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java index 5ef356d02f..e33f5e43ec 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java @@ -46,7 +46,7 @@ // These should remain in sync with AlterTableDesc::AlterTableType enum public List allowedAlterTypes = ImmutableList.of("ADDPROPS", "DROPPROPS"); - String ALTERLOCATION = "ALTERLOCATION"; + String SET_LOCATION = "SET_LOCATION"; /** * Called before a new table definition is added to the metastore diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 03f136bb60..a2d67da15b 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -63,7 +63,7 @@ import java.util.Map; import java.util.Map.Entry; -import static org.apache.hadoop.hive.metastore.HiveMetaHook.ALTERLOCATION; +import static org.apache.hadoop.hive.metastore.HiveMetaHook.SET_LOCATION; import static org.apache.hadoop.hive.metastore.HiveMetaHook.ALTER_TABLE_OPERATION_TYPE; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; @@ -825,7 +825,7 @@ private void blockPartitionLocationChangesOnReplSource(Database db, Table tbl, // new files list. So, it may cause data inconsistency. if (ec.isSetProperties()) { String alterType = ec.getProperties().get(ALTER_TABLE_OPERATION_TYPE); - if (alterType != null && alterType.equalsIgnoreCase(ALTERLOCATION) && + if (alterType != null && alterType.equalsIgnoreCase(SET_LOCATION) && tbl.getTableType().equalsIgnoreCase(TableType.MANAGED_TABLE.name())) { throw new InvalidOperationException("Cannot change location of a managed table " + TableName.getQualified(tbl.getCatName(), @@ -847,12 +847,12 @@ private void validateTableChangesOnReplSource(Database db, Table oldTbl, Table n // new files list. So, it may cause data inconsistency. We do this whether or not strict // managed is true on the source cluster. if (ec.isSetProperties()) { - String alterType = ec.getProperties().get(ALTER_TABLE_OPERATION_TYPE); - if (alterType != null && alterType.equalsIgnoreCase(ALTERLOCATION) && - oldTbl.getTableType().equalsIgnoreCase(TableType.MANAGED_TABLE.name())) { - throw new InvalidOperationException("Cannot change location of a managed table " + - TableName.getQualified(oldTbl.getCatName(), - oldTbl.getDbName(), oldTbl.getTableName()) + " as it is enabled for replication."); + String alterType = ec.getProperties().get(ALTER_TABLE_OPERATION_TYPE); + if (alterType != null && alterType.equalsIgnoreCase(SET_LOCATION) && + oldTbl.getTableType().equalsIgnoreCase(TableType.MANAGED_TABLE.name())) { + String tableName = TableName.getQualified(oldTbl.getCatName(), oldTbl.getDbName(), oldTbl.getTableName()); + throw new InvalidOperationException( + "Cannot change location of a managed table " + tableName + " as it is enabled for replication."); } }