diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index a377805..e9f3b0d 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -27,6 +27,8 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -39,7 +41,6 @@ import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.StorageFormat; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hive.hcatalog.common.HCatConstants; import org.apache.hive.hcatalog.common.HCatUtil; @@ -139,8 +140,7 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, // NOT EXISTS return; } - CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1)) - .getWork().getCreateTblDesc(); + CreateTableDesc desc = (CreateTableDesc) ((DDLTask2) rootTasks.get(rootTasks.size() - 1)).getWork().getDDLDesc(); if (desc == null) { // Desc will be null if its CREATE TABLE LIKE. Desc will be // contained in CreateTableLikeDesc. Currently, HCat disallows CTLT in diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index fd159fe..e003452 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -25,6 +25,9 @@ import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTablesDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -38,12 +41,9 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.ErrorType; @@ -297,51 +297,47 @@ protected void authorizeDDLWork2(HiveSemanticAnalyzerHookContext cntxt, Hive hiv SwitchDatabaseDesc switchDb = (SwitchDatabaseDesc)ddlDesc; Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName()); authorize(db, Privilege.SELECT); + } else if (ddlDesc instanceof ShowTablesDesc) { + ShowTablesDesc showTables = (ShowTablesDesc)ddlDesc; + String dbName = showTables.getDbName() == null ? SessionState.get().getCurrentDatabase() + : showTables.getDbName(); + authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); + } else if (ddlDesc instanceof DescTableDesc) { + // we should be careful when authorizing table based on just the + // table name. If columns have separate authorization domain, it + // must be honored + DescTableDesc descTable = (DescTableDesc)ddlDesc; + String tableName = extractTableName(descTable.getTableName()); + authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); + } else if (ddlDesc instanceof ShowTableStatusDesc) { + ShowTableStatusDesc showTableStatus = (ShowTableStatusDesc)ddlDesc; + String dbName = showTableStatus.getDbName() == null ? SessionState.get().getCurrentDatabase() + : showTableStatus.getDbName(); + authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); } } @Override protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work) throws HiveException { - // DB opereations, none of them are enforced by Hive right now. - - ShowTablesDesc showTables = work.getShowTblsDesc(); - if (showTables != null) { - String dbName = showTables.getDbName() == null ? SessionState.get().getCurrentDatabase() - : showTables.getDbName(); - authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); - } - - ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc(); - if (showTableStatus != null) { - String dbName = showTableStatus.getDbName() == null ? SessionState.get().getCurrentDatabase() - : showTableStatus.getDbName(); - authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT); - } - // TODO: add alter database support in HCat // Table operations. - DropTableDesc dropTable = work.getDropTblDesc(); - if (dropTable != null) { - if (dropTable.getPartSpecs() == null) { - // drop table is already enforced by Hive. We only check for table level location even if the - // table is partitioned. - } else { - //this is actually a ALTER TABLE DROP PARITITION statement - for (DropTableDesc.PartSpec partSpec : dropTable.getPartSpecs()) { - // partitions are not added as write entries in drop partitions in Hive - Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); - List partitions = null; - try { - partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString()); - } catch (Exception e) { - throw new HiveException(e); - } - for (Partition part : partitions) { - authorize(part, Privilege.DROP); - } + DropPartitionDesc dropPartition = work.getDropPartitionDesc(); + if (dropPartition != null) { + //this is actually a ALTER TABLE DROP PARITITION statement + for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()) { + // partitions are not added as write entries in drop partitions in Hive + Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropPartition.getTableName()); + List partitions = null; + try { + partitions = hive.getPartitionsByFilter(table, partSpec.getPartSpec().getExprString()); + } catch (Exception e) { + throw new HiveException(e); + } + for (Partition part : partitions) { + authorize(part, Privilege.DROP); } } } @@ -377,15 +373,6 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive //other alter operations are already supported by Hive } - // we should be careful when authorizing table based on just the - // table name. If columns have separate authorization domain, it - // must be honored - DescTableDesc descTable = work.getDescTblDesc(); - if (descTable != null) { - String tableName = extractTableName(descTable.getTableName()); - authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); - } - ShowPartitionsDesc showParts = work.getShowPartsDesc(); if (showParts != null) { String tableName = extractTableName(showParts.getTabName()); diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java index 3575a16..f988d42 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java @@ -23,14 +23,14 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; public class DummySemanticAnalyzerHook extends AbstractSemanticAnalyzerHook{ @@ -92,7 +92,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, List> rootTasks) throws SemanticException { - CreateTableDesc desc = ((DDLTask)rootTasks.get(rootTasks.size()-1)).getWork().getCreateTblDesc(); + CreateTableDesc desc = (CreateTableDesc) ((DDLTask2)rootTasks.get(rootTasks.size()-1)).getWork().getDDLDesc(); Map tblProps = desc.getTblProps(); if(tblProps == null) { tblProps = new HashMap(); diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java index e20ac64..b2b0072 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java @@ -23,14 +23,14 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; @@ -62,8 +62,7 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, return; } - CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1)).getWork() - .getCreateTblDesc(); + CreateTableDesc desc = (CreateTableDesc) ((DDLTask2) rootTasks.get(rootTasks.size() - 1)).getWork().getDDLDesc(); Map tblProps = desc.getTblProps(); if (tblProps == null) { tblProps = new HashMap(); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java index e349a0a..136cfee 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java @@ -19,10 +19,30 @@ package org.apache.hadoop.hive.ql.ddl; import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.io.IOUtils; +import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hive.common.util.ReflectionUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +58,7 @@ public DDLOperation(DDLOperationContext context) { this.context = context; } - public abstract int execute() throws HiveException; + public abstract int execute() throws Exception; protected DataOutputStream getOutputStream(Path outputFile) throws HiveException { try { @@ -48,4 +68,141 @@ protected DataOutputStream getOutputStream(Path outputFile) throws HiveException throw new HiveException(e); } } + + /** + * There are many places where "duplicate" Read/WriteEnity objects are added. The way this was + * initially implemented, the duplicate just replaced the previous object. + * (work.getOutputs() is a Set and WriteEntity#equals() relies on name) + * This may be benign for ReadEntity and perhaps was benign for WriteEntity before WriteType was + * added. Now that WriteEntity has a WriteType it replaces it with one with possibly different + * {@link org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType}. It's hard to imagine + * how this is desirable. + * + * As of HIVE-14993, WriteEntity with different WriteType must be considered different. + * So WriteEntity created in DDLTask cause extra output in golden files, but only because + * DDLTask sets a different WriteType for the same Entity. + * + * In the spirit of bug-for-bug compatibility, this method ensures we only add new + * WriteEntity if it's really new. + * + * @return {@code true} if item was added + */ + protected boolean addIfAbsentByName(WriteEntity newWriteEntity, Set outputs) { + for(WriteEntity writeEntity : outputs) { + if(writeEntity.getName().equalsIgnoreCase(newWriteEntity.getName())) { + LOG.debug("Ignoring request to add {} because {} is present", newWriteEntity.toStringDetail(), + writeEntity.toStringDetail()); + return false; + } + } + outputs.add(newWriteEntity); + return true; + } + + protected boolean addIfAbsentByName(WriteEntity newWriteEntity) { + return addIfAbsentByName(newWriteEntity, context.getWork().getOutputs()); + } + + + /** + * Check if the given serde is valid. + */ + protected void validateSerDe(String serdeName) throws HiveException { + validateSerDe(serdeName, context.getConf()); + } + + public static void validateSerDe(String serdeName, HiveConf conf) throws HiveException { + try { + Deserializer d = ReflectionUtil.newInstance(conf.getClassByName(serdeName). + asSubclass(Deserializer.class), conf); + if (d != null) { + LOG.debug("Found class for {}", serdeName); + } + } catch (Exception e) { + throw new HiveException("Cannot validate serde: " + serdeName, e); + } + } + + + /** + * Validate if the given table/partition is eligible for update. + * + * @param db Database. + * @param tableName Table name of format db.table + * @param partSpec Partition spec for the partition + * @param replicationSpec Replications specification + * + * @return boolean true if allow the operation + * @throws HiveException + */ + protected boolean allowOperationInReplicationScope(Hive db, String tableName, Map partSpec, + ReplicationSpec replicationSpec) throws HiveException { + if ((null == replicationSpec) || (!replicationSpec.isInReplicationScope())) { + // Always allow the operation if it is not in replication scope. + return true; + } + // If the table/partition exist and is older than the event, then just apply the event else noop. + Table existingTable = db.getTable(tableName, false); + if ((existingTable != null) && replicationSpec.allowEventReplacementInto(existingTable.getParameters())) { + // Table exists and is older than the update. Now, need to ensure if update allowed on the partition. + if (partSpec != null) { + Partition existingPtn = db.getPartition(existingTable, partSpec, false); + return ((existingPtn != null) && replicationSpec.allowEventReplacementInto(existingPtn.getParameters())); + } + + // Replacement is allowed as the existing table is older than event + return true; + } + + // The table is missing either due to drop/rename which follows the operation. + // Or the existing table is newer than our update. So, don't allow the update. + return false; + } + + protected String propertiesToString(Map props, List exclude) { + String propertyString = ""; + if (!props.isEmpty()) { + Map properties = new TreeMap(props); + List realProps = new ArrayList(); + for (String key : properties.keySet()) { + if (properties.get(key) != null && (exclude == null || !exclude.contains(key))) { + realProps.add(" '" + key + "'='" + HiveStringUtils.escapeHiveCommand(properties.get(key)) + "'"); + } + } + propertyString += StringUtils.join(realProps, ", \n"); + } + return propertyString; + } + + protected void writeToFile(String data, String file) throws IOException { + Path resFile = new Path(file); + FileSystem fs = resFile.getFileSystem(context.getConf()); + FSDataOutputStream out = fs.create(resFile); + try { + if (data != null && !data.isEmpty()) { + OutputStreamWriter writer = new OutputStreamWriter(out, "UTF-8"); + writer.write(data); + writer.write((char) Utilities.newLineCode); + writer.flush(); + } + } finally { + IOUtils.closeStream(out); + } + } + + protected StringBuilder appendNonNull(StringBuilder builder, Object value) { + return appendNonNull(builder, value, false); + } + + protected StringBuilder appendNonNull(StringBuilder builder, Object value, boolean firstColumn) { + if (!firstColumn) { + builder.append((char)Utilities.tabCode); + } else if (builder.length() > 0) { + builder.append((char)Utilities.newLineCode); + } + if (value != null) { + builder.append(value); + } + return builder; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java index 924f0b3..14744d1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperationContext.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.QueryPlan; +import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; @@ -33,12 +35,21 @@ private final HiveConf conf; private final DriverContext driverContext; private final MetaDataFormatter formatter; + private final DDLTask2 task; + private final DDLWork2 work; + private final QueryState queryState; + private final QueryPlan queryPlan; - public DDLOperationContext(HiveConf conf, DriverContext driverContext) throws HiveException { + public DDLOperationContext(HiveConf conf, DriverContext driverContext, DDLTask2 task, DDLWork2 work, + QueryState queryState, QueryPlan queryPlan) throws HiveException { this.db = Hive.get(conf); this.conf = conf; this.driverContext = driverContext; this.formatter = MetaDataFormatUtils.getFormatter(conf); + this.task = task; + this.work = work; + this.queryState = queryState; + this.queryPlan = queryPlan; } public Hive getDb() { @@ -56,4 +67,20 @@ public DriverContext getDriverContext() { public MetaDataFormatter getFormatter() { return formatter; } + + public DDLTask2 getTask() { + return task; + } + + public DDLWork2 getWork() { + return work; + } + + public QueryState getQueryState() { + return queryState; + } + + public QueryPlan getQueryPlan() { + return queryPlan; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java index 068e1e7..9026c3b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask2.java @@ -65,7 +65,8 @@ public int execute(DriverContext driverContext) { DDLDesc ddlDesc = work.getDDLDesc(); if (DESC_TO_OPARATION.containsKey(ddlDesc.getClass())) { - DDLOperationContext context = new DDLOperationContext(conf, driverContext); + DDLOperationContext context = new DDLOperationContext(conf, driverContext, this, (DDLWork2)work, queryState, + queryPlan); Class ddlOpertaionClass = DESC_TO_OPARATION.get(ddlDesc.getClass()); Constructor constructor = ddlOpertaionClass.getConstructor(DDLOperationContext.class, ddlDesc.getClass()); @@ -98,6 +99,11 @@ public String getName() { return "DDL"; } + @Override + public void setException(Throwable ex) { + super.setException(ex); + } + /* uses the authorizer from SessionState will need some more work to get this to run in parallel, however this should not be a bottle neck so might not need to parallelize this. diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java index d2fbe8f..44dfdbb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork2.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.plan.Explain; import java.io.Serializable; @@ -67,6 +68,7 @@ public void setNeedLock(boolean needLock) { this.needLock = needLock; } + @Explain(skipHeader = true) public DDLDesc getDDLDesc() { return ddlDesc; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java new file mode 100644 index 0000000..29dc266 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseDesc.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + + +/** + * DDL task description for SHOW CREATE DATABASE commands. + */ +@Explain(displayName = "Show Create Database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowCreateDatabaseDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(ShowCreateDatabaseDesc.class, ShowCreateDatabaseOperation.class); + } + + private final String resFile; + private final String dbName; + + /** + * Thrift ddl for the result of showcreatedatabase. + */ + public static final String SCHEMA = "createdb_stmt#string"; + + public ShowCreateDatabaseDesc(String dbName, String resFile) { + this.dbName = dbName; + this.resFile = resFile; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDatabaseName() { + return dbName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java new file mode 100644 index 0000000..5196522 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/database/ShowCreateDatabaseOperation.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.database; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.DataOutputStream; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hive.common.util.HiveStringUtils; + +/** + * Operation process showing the creation of a database. + */ +public class ShowCreateDatabaseOperation extends DDLOperation { + private final ShowCreateDatabaseDesc desc; + + public ShowCreateDatabaseOperation(DDLOperationContext context, ShowCreateDatabaseDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + DataOutputStream outStream = getOutputStream(new Path(desc.getResFile())); + try { + return showCreateDatabase(outStream); + } catch (Exception e) { + throw new HiveException(e); + } finally { + IOUtils.closeStream(outStream); + } + } + + private int showCreateDatabase(DataOutputStream outStream) throws Exception { + Database database = context.getDb().getDatabase(desc.getDatabaseName()); + + StringBuilder createDbCommand = new StringBuilder(); + createDbCommand.append("CREATE DATABASE `").append(database.getName()).append("`\n"); + if (database.getDescription() != null) { + createDbCommand.append("COMMENT\n '"); + createDbCommand.append(HiveStringUtils.escapeHiveCommand(database.getDescription())).append("'\n"); + } + createDbCommand.append("LOCATION\n '"); + createDbCommand.append(database.getLocationUri()).append("'\n"); + String propertiesToString = propertiesToString(database.getParameters(), null); + if (!propertiesToString.isEmpty()) { + createDbCommand.append("WITH DBPROPERTIES (\n"); + createDbCommand.append(propertiesToString).append(")\n"); + } + + outStream.write(createDbCommand.toString().getBytes("UTF-8")); + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java new file mode 100644 index 0000000..13e7dbc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableDesc.java @@ -0,0 +1,963 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.PartitionManagementTask; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; +import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.PlanUtils; +import org.apache.hadoop.hive.ql.plan.ValidationUtility; +import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.mapred.OutputFormat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * DDL task description for CREATE TABLE commands. + */ +@Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateTableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + private static final Logger LOG = LoggerFactory.getLogger(CreateTableDesc.class); + + static { + DDLTask2.registerOperation(CreateTableDesc.class, CreateTableOperation.class); + } + + String databaseName; + String tableName; + boolean isExternal; + List cols; + List partCols; + List partColNames; + List bucketCols; + List sortCols; + int numBuckets; + String fieldDelim; + String fieldEscape; + String collItemDelim; + String mapKeyDelim; + String lineDelim; + String nullFormat; + String comment; + String inputFormat; + String outputFormat; + String location; + String serName; + String storageHandler; + Map serdeProps; + Map tblProps; + boolean ifNotExists; + List skewedColNames; + List> skewedColValues; + boolean isStoredAsSubDirectories = false; + boolean isTemporary = false; + private boolean isMaterialization = false; + private boolean replaceMode = false; + private ReplicationSpec replicationSpec = null; + private boolean isCTAS = false; + List primaryKeys; + List foreignKeys; + List uniqueConstraints; + List notNullConstraints; + List defaultConstraints; + List checkConstraints; + private ColumnStatistics colStats; + private Long initialMmWriteId; // Initial MM write ID for CTAS and import. + // The FSOP configuration for the FSOP that is going to write initial data during ctas. + // This is not needed beyond compilation, so it is transient. + private transient FileSinkDesc writer; + private Long replWriteId; // to be used by repl task to get the txn and valid write id list + private String ownerName = null; + + public CreateTableDesc() { + } + + public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, + List cols, List partCols, + List bucketCols, List sortCols, int numBuckets, + String fieldDelim, String fieldEscape, String collItemDelim, + String mapKeyDelim, String lineDelim, String comment, String inputFormat, + String outputFormat, String location, String serName, + String storageHandler, + Map serdeProps, + Map tblProps, + boolean ifNotExists, List skewedColNames, List> skewedColValues, + List primaryKeys, List foreignKeys, + List uniqueConstraints, List notNullConstraints, + List defaultConstraints, List checkConstraints, + ColumnStatistics colStats) { + + this(tableName, isExternal, isTemporary, cols, partCols, + bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, + collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, + outputFormat, location, serName, storageHandler, serdeProps, + tblProps, ifNotExists, skewedColNames, skewedColValues, + primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + + this.databaseName = databaseName; + this.colStats = colStats; + } + + public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, + List cols, List partColNames, + List bucketCols, List sortCols, int numBuckets, + String fieldDelim, String fieldEscape, String collItemDelim, + String mapKeyDelim, String lineDelim, String comment, String inputFormat, + String outputFormat, String location, String serName, + String storageHandler, + Map serdeProps, + Map tblProps, + boolean ifNotExists, List skewedColNames, List> skewedColValues, + boolean isCTAS, List primaryKeys, List foreignKeys, + List uniqueConstraints, List notNullConstraints, + List defaultConstraints, List checkConstraints) { + this(databaseName, tableName, isExternal, isTemporary, cols, new ArrayList<>(), + bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, + collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, + outputFormat, location, serName, storageHandler, serdeProps, + tblProps, ifNotExists, skewedColNames, skewedColValues, + primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, + null); + this.partColNames = partColNames; + this.isCTAS = isCTAS; + } + + public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary, + List cols, List partCols, + List bucketCols, List sortCols, int numBuckets, + String fieldDelim, String fieldEscape, String collItemDelim, + String mapKeyDelim, String lineDelim, String comment, String inputFormat, + String outputFormat, String location, String serName, + String storageHandler, + Map serdeProps, + Map tblProps, + boolean ifNotExists, List skewedColNames, List> skewedColValues, + List primaryKeys, List foreignKeys, + List uniqueConstraints, List notNullConstraints, + List defaultConstraints, List checkConstraints) { + this.tableName = tableName; + this.isExternal = isExternal; + this.isTemporary = isTemporary; + this.bucketCols = new ArrayList(bucketCols); + this.sortCols = new ArrayList(sortCols); + this.collItemDelim = collItemDelim; + this.cols = new ArrayList(cols); + this.comment = comment; + this.fieldDelim = fieldDelim; + this.fieldEscape = fieldEscape; + this.inputFormat = inputFormat; + this.outputFormat = outputFormat; + this.lineDelim = lineDelim; + this.location = location; + this.mapKeyDelim = mapKeyDelim; + this.numBuckets = numBuckets; + this.partCols = new ArrayList(partCols); + this.serName = serName; + this.storageHandler = storageHandler; + this.serdeProps = serdeProps; + this.tblProps = tblProps; + this.ifNotExists = ifNotExists; + this.skewedColNames = copyList(skewedColNames); + this.skewedColValues = copyList(skewedColValues); + this.primaryKeys = copyList(primaryKeys); + this.foreignKeys = copyList(foreignKeys); + this.uniqueConstraints = copyList(uniqueConstraints); + this.notNullConstraints = copyList(notNullConstraints); + this.defaultConstraints = copyList(defaultConstraints); + this.checkConstraints= copyList(checkConstraints); + } + + private static List copyList(List copy) { + return copy == null ? null : new ArrayList(copy); + } + + @Explain(displayName = "columns") + public List getColsString() { + return Utilities.getFieldSchemaString(getCols()); + } + + @Explain(displayName = "partition columns") + public List getPartColsString() { + return Utilities.getFieldSchemaString(getPartCols()); + } + + @Explain(displayName = "if not exists", displayOnlyOnTrue = true) + public boolean getIfNotExists() { + return ifNotExists; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + public String getDatabaseName(){ + return databaseName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public List getCols() { + return cols; + } + + public void setCols(ArrayList cols) { + this.cols = cols; + } + + public List getPartCols() { + return partCols; + } + + public void setPartCols(ArrayList partCols) { + this.partCols = partCols; + } + + public List getPartColNames() { + return partColNames; + } + + public void setPartColNames(ArrayList partColNames) { + this.partColNames = partColNames; + } + + public List getPrimaryKeys() { + return primaryKeys; + } + + public void setPrimaryKeys(ArrayList primaryKeys) { + this.primaryKeys = primaryKeys; + } + + public List getForeignKeys() { + return foreignKeys; + } + + public void setForeignKeys(ArrayList foreignKeys) { + this.foreignKeys = foreignKeys; + } + + public List getUniqueConstraints() { + return uniqueConstraints; + } + + public List getNotNullConstraints() { + return notNullConstraints; + } + + public List getDefaultConstraints() { + return defaultConstraints; + } + + public List getCheckConstraints() { + return checkConstraints; + } + + @Explain(displayName = "bucket columns") + public List getBucketCols() { + return bucketCols; + } + + public void setBucketCols(ArrayList bucketCols) { + this.bucketCols = bucketCols; + } + + @Explain(displayName = "# buckets") + public Integer getNumBucketsExplain() { + if (numBuckets == -1) { + return null; + } else { + return numBuckets; + } + } + + public int getNumBuckets() { + return numBuckets; + } + + public void setNumBuckets(int numBuckets) { + this.numBuckets = numBuckets; + } + + @Explain(displayName = "field delimiter") + public String getFieldDelim() { + return fieldDelim; + } + + public void setFieldDelim(String fieldDelim) { + this.fieldDelim = fieldDelim; + } + + @Explain(displayName = "field escape") + public String getFieldEscape() { + return fieldEscape; + } + + public void setFieldEscape(String fieldEscape) { + this.fieldEscape = fieldEscape; + } + + @Explain(displayName = "collection delimiter") + public String getCollItemDelim() { + return collItemDelim; + } + + public void setCollItemDelim(String collItemDelim) { + this.collItemDelim = collItemDelim; + } + + @Explain(displayName = "map key delimiter") + public String getMapKeyDelim() { + return mapKeyDelim; + } + + public void setMapKeyDelim(String mapKeyDelim) { + this.mapKeyDelim = mapKeyDelim; + } + + @Explain(displayName = "line delimiter") + public String getLineDelim() { + return lineDelim; + } + + public void setLineDelim(String lineDelim) { + this.lineDelim = lineDelim; + } + + @Explain(displayName = "comment") + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Explain(displayName = "input format") + public String getInputFormat() { + return inputFormat; + } + + public void setInputFormat(String inputFormat) { + this.inputFormat = inputFormat; + } + + @Explain(displayName = "output format") + public String getOutputFormat() { + return outputFormat; + } + + public void setOutputFormat(String outputFormat) { + this.outputFormat = outputFormat; + } + + @Explain(displayName = "storage handler") + public String getStorageHandler() { + return storageHandler; + } + + public void setStorageHandler(String storageHandler) { + this.storageHandler = storageHandler; + } + + @Explain(displayName = "location") + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + + @Explain(displayName = "isExternal", displayOnlyOnTrue = true) + public boolean isExternal() { + return isExternal; + } + + public void setExternal(boolean isExternal) { + this.isExternal = isExternal; + } + + /** + * @return the sortCols + */ + @Explain(displayName = "sort columns") + public List getSortCols() { + return sortCols; + } + + /** + * @param sortCols + * the sortCols to set + */ + public void setSortCols(ArrayList sortCols) { + this.sortCols = sortCols; + } + + /** + * @return the serDeName + */ + @Explain(displayName = "serde name") + public String getSerName() { + return serName; + } + + /** + * @param serName + * the serName to set + */ + public void setSerName(String serName) { + this.serName = serName; + } + + /** + * @return the serDe properties + */ + @Explain(displayName = "serde properties") + public Map getSerdeProps() { + return serdeProps; + } + + /** + * @param serdeProps + * the serde properties to set + */ + public void setSerdeProps(Map serdeProps) { + this.serdeProps = serdeProps; + } + + /** + * @return the table properties + */ + @Explain(displayName = "table properties") + public Map getTblProps() { + return tblProps; + } + + /** + * @param tblProps + * the table properties to set + */ + public void setTblProps(Map tblProps) { + this.tblProps = tblProps; + } + + /** + * @return the skewedColNames + */ + public List getSkewedColNames() { + return skewedColNames; + } + + /** + * @param skewedColNames the skewedColNames to set + */ + public void setSkewedColNames(ArrayList skewedColNames) { + this.skewedColNames = skewedColNames; + } + + /** + * @return the skewedColValues + */ + public List> getSkewedColValues() { + return skewedColValues; + } + + /** + * @param skewedColValues the skewedColValues to set + */ + public void setSkewedColValues(ArrayList> skewedColValues) { + this.skewedColValues = skewedColValues; + } + + public void validate(HiveConf conf) + throws SemanticException { + + if ((this.getCols() == null) || (this.getCols().size() == 0)) { + // for now make sure that serde exists + if (Table.hasMetastoreBasedSchema(conf, serName) && + StringUtils.isEmpty(getStorageHandler())) { + throw new SemanticException(ErrorMsg.INVALID_TBL_DDL_SERDE.getMsg()); + } + return; + } + + if (this.getStorageHandler() == null) { + try { + Class origin = Class.forName(this.getOutputFormat(), true, Utilities.getSessionSpecifiedClassLoader()); + Class replaced = HiveFileFormatUtils.getOutputFormatSubstitute(origin); + if (!HiveOutputFormat.class.isAssignableFrom(replaced)) { + throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); + } + } catch (ClassNotFoundException e) { + throw new SemanticException(ErrorMsg.CLASSPATH_ERROR.getMsg(), e); + } + } + + List colNames = ParseUtils.validateColumnNameUniqueness(this.getCols()); + + if (this.getBucketCols() != null) { + // all columns in cluster and sort are valid columns + Iterator bucketCols = this.getBucketCols().iterator(); + while (bucketCols.hasNext()) { + String bucketCol = bucketCols.next(); + boolean found = false; + Iterator colNamesIter = colNames.iterator(); + while (colNamesIter.hasNext()) { + String colName = colNamesIter.next(); + if (bucketCol.equalsIgnoreCase(colName)) { + found = true; + break; + } + } + if (!found) { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(" \'" + bucketCol + "\'")); + } + } + } + + if (this.getSortCols() != null) { + // all columns in cluster and sort are valid columns + Iterator sortCols = this.getSortCols().iterator(); + while (sortCols.hasNext()) { + String sortCol = sortCols.next().getCol(); + boolean found = false; + Iterator colNamesIter = colNames.iterator(); + while (colNamesIter.hasNext()) { + String colName = colNamesIter.next(); + if (sortCol.equalsIgnoreCase(colName)) { + found = true; + break; + } + } + if (!found) { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(" \'" + sortCol + "\'")); + } + } + } + + if (this.getPartCols() != null) { + // there is no overlap between columns and partitioning columns + Iterator partColsIter = this.getPartCols().iterator(); + while (partColsIter.hasNext()) { + FieldSchema fs = partColsIter.next(); + String partCol = fs.getName(); + TypeInfo pti = null; + try { + pti = TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()); + } catch (Exception err) { + LOG.error("Failed to get type info", err); + } + if(null == pti){ + throw new SemanticException(ErrorMsg.PARTITION_COLUMN_NON_PRIMITIVE.getMsg() + " Found " + + partCol + " of type: " + fs.getType()); + } + Iterator colNamesIter = colNames.iterator(); + while (colNamesIter.hasNext()) { + String colName = BaseSemanticAnalyzer.unescapeIdentifier(colNamesIter.next()); + if (partCol.equalsIgnoreCase(colName)) { + throw new SemanticException( + ErrorMsg.COLUMN_REPEATED_IN_PARTITIONING_COLS.getMsg()); + } + } + } + } + + /* Validate skewed information. */ + ValidationUtility.validateSkewedInformation(colNames, this.getSkewedColNames(), + this.getSkewedColValues()); + } + + /** + * @return the isStoredAsSubDirectories + */ + public boolean isStoredAsSubDirectories() { + return isStoredAsSubDirectories; + } + + /** + * @param isStoredAsSubDirectories the isStoredAsSubDirectories to set + */ + public void setStoredAsSubDirectories(boolean isStoredAsSubDirectories) { + this.isStoredAsSubDirectories = isStoredAsSubDirectories; + } + + /** + * @return the nullFormat + */ + public String getNullFormat() { + return nullFormat; + } + + /** + * Set null format string + * @param nullFormat + */ + public void setNullFormat(String nullFormat) { + this.nullFormat = nullFormat; + } + + /** + * @return the isTemporary + */ + @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) + public boolean isTemporary() { + return isTemporary; + } + + /** + * @param isTemporary table is Temporary or not. + */ + public void setTemporary(boolean isTemporary) { + this.isTemporary = isTemporary; + } + + /** + * @return the isMaterialization + */ + @Explain(displayName = "isMaterialization", displayOnlyOnTrue = true) + public boolean isMaterialization() { + return isMaterialization; + } + + /** + * @param isMaterialization table is a materialization or not. + */ + public void setMaterialization(boolean isMaterialization) { + this.isMaterialization = isMaterialization; + } + + /** + * @param replaceMode Determine if this CreateTable should behave like a replace-into alter instead + */ + public void setReplaceMode(boolean replaceMode) { + this.replaceMode = replaceMode; + } + + /** + * @return true if this CreateTable should behave like a replace-into alter instead + */ + public boolean getReplaceMode() { + return replaceMode; + } + + /** + * @param replicationSpec Sets the replication spec governing this create. + * This parameter will have meaningful values only for creates happening as a result of a replication. + */ + public void setReplicationSpec(ReplicationSpec replicationSpec) { + this.replicationSpec = replicationSpec; + } + + /** + * @return what kind of replication scope this drop is running under. + * This can result in a "CREATE/REPLACE IF NEWER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec(){ + if (replicationSpec == null){ + this.replicationSpec = new ReplicationSpec(); + } + return this.replicationSpec; + } + + public boolean isCTAS() { + return isCTAS; + } + + public Table toTable(HiveConf conf) throws HiveException { + String databaseName = getDatabaseName(); + String tableName = getTableName(); + + if (databaseName == null || tableName.contains(".")) { + String[] names = Utilities.getDbTableName(tableName); + databaseName = names[0]; + tableName = names[1]; + } + + Table tbl = new Table(databaseName, tableName); + + if (getTblProps() != null) { + tbl.getTTable().getParameters().putAll(getTblProps()); + } + + if (getPartCols() != null) { + tbl.setPartCols(getPartCols()); + } + + if (getNumBuckets() != -1) { + tbl.setNumBuckets(getNumBuckets()); + } + + if (getStorageHandler() != null) { + tbl.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, + getStorageHandler()); + } + HiveStorageHandler storageHandler = tbl.getStorageHandler(); + + /* + * If the user didn't specify a SerDe, we use the default. + */ + String serDeClassName; + if (getSerName() == null) { + if (storageHandler == null) { + serDeClassName = PlanUtils.getDefaultSerDe().getName(); + LOG.info("Default to " + serDeClassName + " for table " + tableName); + } else { + serDeClassName = storageHandler.getSerDeClass().getName(); + LOG.info("Use StorageHandler-supplied " + serDeClassName + + " for table " + tableName); + } + } else { + // let's validate that the serde exists + serDeClassName = getSerName(); + DDLOperation.validateSerDe(serDeClassName, conf); + } + tbl.setSerializationLib(serDeClassName); + + if (getFieldDelim() != null) { + tbl.setSerdeParam(serdeConstants.FIELD_DELIM, getFieldDelim()); + tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, getFieldDelim()); + } + if (getFieldEscape() != null) { + tbl.setSerdeParam(serdeConstants.ESCAPE_CHAR, getFieldEscape()); + } + + if (getCollItemDelim() != null) { + tbl.setSerdeParam(serdeConstants.COLLECTION_DELIM, getCollItemDelim()); + } + if (getMapKeyDelim() != null) { + tbl.setSerdeParam(serdeConstants.MAPKEY_DELIM, getMapKeyDelim()); + } + if (getLineDelim() != null) { + tbl.setSerdeParam(serdeConstants.LINE_DELIM, getLineDelim()); + } + if (getNullFormat() != null) { + tbl.setSerdeParam(serdeConstants.SERIALIZATION_NULL_FORMAT, getNullFormat()); + } + if (getSerdeProps() != null) { + Iterator> iter = getSerdeProps().entrySet() + .iterator(); + while (iter.hasNext()) { + Map.Entry m = iter.next(); + tbl.setSerdeParam(m.getKey(), m.getValue()); + } + } + + if (getCols() != null) { + tbl.setFields(getCols()); + } + if (getBucketCols() != null) { + tbl.setBucketCols(getBucketCols()); + } + if (getSortCols() != null) { + tbl.setSortCols(getSortCols()); + } + if (getComment() != null) { + tbl.setProperty("comment", getComment()); + } + if (getLocation() != null) { + tbl.setDataLocation(new Path(getLocation())); + } + + if (getSkewedColNames() != null) { + tbl.setSkewedColNames(getSkewedColNames()); + } + if (getSkewedColValues() != null) { + tbl.setSkewedColValues(getSkewedColValues()); + } + + tbl.getTTable().setTemporary(isTemporary()); + + tbl.setStoredAsSubDirectories(isStoredAsSubDirectories()); + + tbl.setInputFormatClass(getInputFormat()); + tbl.setOutputFormatClass(getOutputFormat()); + + // only persist input/output format to metadata when it is explicitly specified. + // Otherwise, load lazily via StorageHandler at query time. + if (getInputFormat() != null && !getInputFormat().isEmpty()) { + tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName()); + } + if (getOutputFormat() != null && !getOutputFormat().isEmpty()) { + tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName()); + } + + if (CreateTableOperation.doesTableNeedLocation(tbl)) { + // If location is specified - ensure that it is a full qualified name + CreateTableOperation.makeLocationQualified(tbl, conf); + } + + if (isExternal()) { + tbl.setProperty("EXTERNAL", "TRUE"); + tbl.setTableType(TableType.EXTERNAL_TABLE); + // only add if user have not explicit set it (user explicitly disabled for example in which case don't flip it) + if (tbl.getProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY) == null) { + // partition discovery is on by default if undefined + tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); + } + } + + // If the sorted columns is a superset of bucketed columns, store this fact. + // It can be later used to + // optimize some group-by queries. Note that, the order does not matter as + // long as it in the first + // 'n' columns where 'n' is the length of the bucketed columns. + if ((tbl.getBucketCols() != null) && (tbl.getSortCols() != null)) { + List bucketCols = tbl.getBucketCols(); + List sortCols = tbl.getSortCols(); + + if ((sortCols.size() > 0) && (sortCols.size() >= bucketCols.size())) { + boolean found = true; + + Iterator iterBucketCols = bucketCols.iterator(); + while (iterBucketCols.hasNext()) { + String bucketCol = iterBucketCols.next(); + boolean colFound = false; + for (int i = 0; i < bucketCols.size(); i++) { + if (bucketCol.equals(sortCols.get(i).getCol())) { + colFound = true; + break; + } + } + if (colFound == false) { + found = false; + break; + } + } + if (found) { + tbl.setProperty("SORTBUCKETCOLSPREFIX", "TRUE"); + } + } + } + + if (colStats != null) { + ColumnStatisticsDesc colStatsDesc = new ColumnStatisticsDesc(colStats.getStatsDesc()); + colStatsDesc.setCatName(tbl.getCatName()); + colStatsDesc.setDbName(getTableName()); + colStatsDesc.setDbName(getDatabaseName()); + tbl.getTTable().setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj())); + } + + // The statistics for non-transactional tables will be obtained from the source. Do not + // reset those on replica. + if (replicationSpec != null && replicationSpec.isInReplicationScope() && + !TxnUtils.isTransactionalTable(tbl.getTTable())) { + // Do nothing to the table statistics. + } else { + if (!this.isCTAS && (tbl.getPath() == null || (tbl.isEmpty() && !isExternal()))) { + if (!tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), + MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); + } + } else { + StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), null, + StatsSetupConst.FALSE); + } + } + + if (ownerName != null) { + tbl.setOwner(ownerName); + } + return tbl; + } + + public void setInitialMmWriteId(Long mmWriteId) { + this.initialMmWriteId = mmWriteId; + } + + public Long getInitialMmWriteId() { + return initialMmWriteId; + } + + + + public FileSinkDesc getAndUnsetWriter() { + FileSinkDesc fsd = writer; + writer = null; + return fsd; + } + + public void setWriter(FileSinkDesc writer) { + this.writer = writer; + } + + public Long getReplWriteId() { + return replWriteId; + } + + public void setReplWriteId(Long replWriteId) { + this.replWriteId = replWriteId; + } + + public String getOwnerName() { + return ownerName; + } + + public void setOwnerName(String ownerName) { + this.ownerName = ownerName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java new file mode 100644 index 0000000..6652b79 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeDesc.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for CREATE TABLE LIKE commands. + */ +@Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateTableLikeDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(CreateTableLikeDesc.class, CreateTableLikeOperation.class); + } + + private final String tableName; + private final boolean isExternal; + private final boolean isTemporary; + private final String defaultInputFormat; + private final String defaultOutputFormat; + private final String location; + private final String defaultSerName; + private final Map defaultSerdeProps; + private final Map tblProps; + private final boolean ifNotExists; + private final String likeTableName; + private final boolean isUserStorageFormat; + + public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary, String defaultInputFormat, + String defaultOutputFormat, String location, String defaultSerName, Map defaultSerdeProps, + Map tblProps, boolean ifNotExists, String likeTableName, boolean isUserStorageFormat) { + this.tableName = tableName; + this.isExternal = isExternal; + this.isTemporary = isTemporary; + this.defaultInputFormat = defaultInputFormat; + this.defaultOutputFormat = defaultOutputFormat; + this.defaultSerName = defaultSerName; + this.defaultSerdeProps = defaultSerdeProps; + this.location = location; + this.tblProps = tblProps; + this.ifNotExists = ifNotExists; + this.likeTableName = likeTableName; + this.isUserStorageFormat = isUserStorageFormat; + } + + @Explain(displayName = "if not exists", displayOnlyOnTrue = true) + public boolean getIfNotExists() { + return ifNotExists; + } + + @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "default input format") + public String getDefaultInputFormat() { + return defaultInputFormat; + } + + @Explain(displayName = "default output format") + public String getDefaultOutputFormat() { + return defaultOutputFormat; + } + + @Explain(displayName = "location") + public String getLocation() { + return location; + } + + @Explain(displayName = "isExternal", displayOnlyOnTrue = true) + public boolean isExternal() { + return isExternal; + } + + @Explain(displayName = "default serde name") + public String getDefaultSerName() { + return defaultSerName; + } + + @Explain(displayName = "serde properties") + public Map getDefaultSerdeProps() { + return defaultSerdeProps; + } + + @Explain(displayName = "like") + public String getLikeTableName() { + return likeTableName; + } + + @Explain(displayName = "table properties") + public Map getTblProps() { + return tblProps; + } + + @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) + public boolean isTemporary() { + return isTemporary; + } + + public boolean isUserStorageFormat() { + return this.isUserStorageFormat; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java new file mode 100644 index 0000000..0e8813c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java @@ -0,0 +1,210 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.PartitionManagementTask; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDeSpec; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hive.common.util.AnnotationUtils; + +/** + * Operation process of creating a table like an existing one. + */ +public class CreateTableLikeOperation extends DDLOperation { + private final CreateTableLikeDesc desc; + + public CreateTableLikeOperation(DDLOperationContext context, CreateTableLikeDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // Get the existing table + Table oldtbl = context.getDb().getTable(desc.getLikeTableName()); + Table tbl; + if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW || oldtbl.getTableType() == TableType.MATERIALIZED_VIEW) { + tbl = createViewLikeTable(oldtbl); + } else { + tbl = createTableLikeTable(oldtbl); + } + + // If location is specified - ensure that it is a full qualified name + if (CreateTableOperation.doesTableNeedLocation(tbl)) { + CreateTableOperation.makeLocationQualified(tbl, context.getConf()); + } + + if (desc.getLocation() == null && !tbl.isPartitioned() && + context.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), + MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); + } + + // create the table + context.getDb().createTable(tbl, desc.getIfNotExists()); + addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); + return 0; + } + + private Table createViewLikeTable(Table oldtbl) throws HiveException { + Table tbl; + String targetTableName = desc.getTableName(); + tbl = context.getDb().newTable(targetTableName); + + if (desc.getTblProps() != null) { + tbl.getTTable().getParameters().putAll(desc.getTblProps()); + } + + tbl.setTableType(TableType.MANAGED_TABLE); + + if (desc.isExternal()) { + tbl.setProperty("EXTERNAL", "TRUE"); + tbl.setTableType(TableType.EXTERNAL_TABLE); + // partition discovery is on by default + tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); + } + + tbl.setFields(oldtbl.getCols()); + tbl.setPartCols(oldtbl.getPartCols()); + + if (desc.getDefaultSerName() == null) { + LOG.info("Default to LazySimpleSerDe for table {}", targetTableName); + tbl.setSerializationLib(LazySimpleSerDe.class.getName()); + } else { + // let's validate that the serde exists + validateSerDe(desc.getDefaultSerName()); + tbl.setSerializationLib(desc.getDefaultSerName()); + } + + if (desc.getDefaultSerdeProps() != null) { + for (Map.Entry e : desc.getDefaultSerdeProps().entrySet()) { + tbl.setSerdeParam(e.getKey(), e.getValue()); + } + } + + tbl.setInputFormatClass(desc.getDefaultInputFormat()); + tbl.setOutputFormatClass(desc.getDefaultOutputFormat()); + tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName()); + tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName()); + + return tbl; + } + + private Table createTableLikeTable(Table oldtbl) throws SemanticException, HiveException { + Table tbl = oldtbl; + + // find out database name and table name of target table + String targetTableName = desc.getTableName(); + String[] names = Utilities.getDbTableName(targetTableName); + + tbl.setDbName(names[0]); + tbl.setTableName(names[1]); + + // using old table object, hence reset the owner to current user for new table. + tbl.setOwner(SessionState.getUserFromAuthenticator()); + + if (desc.getLocation() != null) { + tbl.setDataLocation(new Path(desc.getLocation())); + } else { + tbl.unsetDataLocation(); + } + + Class serdeClass; + try { + serdeClass = oldtbl.getDeserializerClass(); + } catch (Exception e) { + throw new HiveException(e); + } + // We should copy only those table parameters that are specified in the config. + SerDeSpec spec = AnnotationUtils.getAnnotation(serdeClass, SerDeSpec.class); + + Set retainer = new HashSet(); + // for non-native table, property storage_handler should be retained + retainer.add(META_TABLE_STORAGE); + if (spec != null && spec.schemaProps() != null) { + retainer.addAll(Arrays.asList(spec.schemaProps())); + } + + String paramsStr = HiveConf.getVar(context.getConf(), HiveConf.ConfVars.DDL_CTL_PARAMETERS_WHITELIST); + if (paramsStr != null) { + retainer.addAll(Arrays.asList(paramsStr.split(","))); + } + + Map params = tbl.getParameters(); + if (!retainer.isEmpty()) { + params.keySet().retainAll(retainer); + } else { + params.clear(); + } + + if (desc.getTblProps() != null) { + params.putAll(desc.getTblProps()); + } + + if (desc.isUserStorageFormat()) { + tbl.setInputFormatClass(desc.getDefaultInputFormat()); + tbl.setOutputFormatClass(desc.getDefaultOutputFormat()); + tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName()); + tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName()); + if (desc.getDefaultSerName() == null) { + LOG.info("Default to LazySimpleSerDe for like table {}", targetTableName); + tbl.setSerializationLib(LazySimpleSerDe.class.getName()); + } else { + // let's validate that the serde exists + validateSerDe(desc.getDefaultSerName()); + tbl.setSerializationLib(desc.getDefaultSerName()); + } + } + + tbl.getTTable().setTemporary(desc.isTemporary()); + tbl.getTTable().unsetId(); + + if (desc.isExternal()) { + tbl.setProperty("EXTERNAL", "TRUE"); + tbl.setTableType(TableType.EXTERNAL_TABLE); + // partition discovery is on by default + tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); + } else { + tbl.getParameters().remove("EXTERNAL"); + } + + return tbl; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java new file mode 100644 index 0000000..a0ddb85 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +/** + * Operation process of creating a table. + */ +public class CreateTableOperation extends DDLOperation { + private final CreateTableDesc desc; + + public CreateTableOperation(DDLOperationContext context, CreateTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // create the table + Table tbl = desc.toTable(context.getConf()); + LOG.debug("creating table {} on {}", tbl.getFullyQualifiedName(), tbl.getDataLocation()); + + if (desc.getReplicationSpec().isInReplicationScope() && (!desc.getReplaceMode())){ + // if this is a replication spec, then replace-mode semantics might apply. + // if we're already asking for a table replacement, then we can skip this check. + // however, otherwise, if in replication scope, and we've not been explicitly asked + // to replace, we should check if the object we're looking at exists, and if so, + // trigger replace-mode semantics. + Table existingTable = context.getDb().getTable(tbl.getDbName(), tbl.getTableName(), false); + if (existingTable != null){ + if (desc.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())) { + desc.setReplaceMode(true); // we replace existing table. + ReplicationSpec.copyLastReplId(existingTable.getParameters(), tbl.getParameters()); + } else { + LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", desc.getTableName()); + return 0; // no replacement, the existing table state is newer than our update. + } + } + } + + // create the table + if (desc.getReplaceMode()) { + createTableReplaceMode(tbl); + } else { + createTableNonReplaceMode(tbl); + } + + addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); + return 0; + } + + private void createTableReplaceMode(Table tbl) throws HiveException { + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + long writeId = 0; + EnvironmentContext environmentContext = null; + if (replicationSpec != null && replicationSpec.isInReplicationScope()) { + if (replicationSpec.isMigratingToTxnTable()) { + // for migration we start the transaction and allocate write id in repl txn task for migration. + String writeIdPara = context.getConf().get(ReplUtils.REPL_CURRENT_TBL_WRITE_ID); + if (writeIdPara == null) { + throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); + } + writeId = Long.parseLong(writeIdPara); + } else { + writeId = desc.getReplWriteId(); + } + + // In case of replication statistics is obtained from the source, so do not update those + // on replica. Since we are not replicating statisics for transactional tables, do not do + // so for transactional tables right now. + if (!AcidUtils.isTransactionalTable(desc)) { + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); + } + } + + // replace-mode creates are really alters using CreateTableDesc. + context.getDb().alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false, environmentContext, + true, writeId); + } + + private void createTableNonReplaceMode(Table tbl) throws HiveException { + if (CollectionUtils.isNotEmpty(desc.getPrimaryKeys()) || + CollectionUtils.isNotEmpty(desc.getForeignKeys()) || + CollectionUtils.isNotEmpty(desc.getUniqueConstraints()) || + CollectionUtils.isNotEmpty(desc.getNotNullConstraints()) || + CollectionUtils.isNotEmpty(desc.getDefaultConstraints()) || + CollectionUtils.isNotEmpty(desc.getCheckConstraints())) { + context.getDb().createTable(tbl, desc.getIfNotExists(), desc.getPrimaryKeys(), desc.getForeignKeys(), + desc.getUniqueConstraints(), desc.getNotNullConstraints(), desc.getDefaultConstraints(), + desc.getCheckConstraints()); + } else { + context.getDb().createTable(tbl, desc.getIfNotExists()); + } + + if (desc.isCTAS()) { + Table createdTable = context.getDb().getTable(tbl.getDbName(), tbl.getTableName()); + DataContainer dc = new DataContainer(createdTable.getTTable()); + context.getQueryState().getLineageState().setLineage(createdTable.getPath(), dc, createdTable.getCols()); + } + } + + public static boolean doesTableNeedLocation(Table tbl) { + // TODO: If we are ok with breaking compatibility of existing 3rd party StorageHandlers, + // this method could be moved to the HiveStorageHandler interface. + boolean retval = true; + if (tbl.getStorageHandler() != null) { + // TODO: why doesn't this check class name rather than toString? + String sh = tbl.getStorageHandler().toString(); + retval = !"org.apache.hadoop.hive.hbase.HBaseStorageHandler".equals(sh) && + !Constants.DRUID_HIVE_STORAGE_HANDLER_ID.equals(sh) && + !Constants.JDBC_HIVE_STORAGE_HANDLER_ID.equals(sh) && + !"org.apache.hadoop.hive.accumulo.AccumuloStorageHandler".equals(sh); + } + return retval; + } + + public static void makeLocationQualified(Table table, HiveConf conf) throws HiveException { + StorageDescriptor sd = table.getTTable().getSd(); + // If the table's location is currently unset, it is left unset, allowing the metastore to + // fill in the table's location. + // Note that the previous logic for some reason would make a special case if the DB was the + // default database, and actually attempt to generate a location. + // This seems incorrect and uncessary, since the metastore is just as able to fill in the + // default table location in the case of the default DB, as it is for non-default DBs. + Path path = null; + if (sd.isSetLocation()) { + path = new Path(sd.getLocation()); + } + if (path != null) { + sd.setLocation(Utilities.getQualifiedPath(conf, path)); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java new file mode 100644 index 0000000..0cfffd2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableDesc.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + + +/** + * DDL task description for DESC table_name commands. + */ +@Explain(displayName = "Describe Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DescTableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(DescTableDesc.class, DescTableOperation.class); + } + + private final String resFile; + private final String tableName; + private final Map partSpec; + private final String colPath; + private final boolean isExt; + private final boolean isFormatted; + + public DescTableDesc(Path resFile, String tableName, Map partSpec, String colPath, boolean isExt, + boolean isFormatted) { + this.resFile = resFile.toString(); + this.tableName = tableName; + this.partSpec = partSpec; + this.colPath = colPath; + this.isExt = isExt; + this.isFormatted = isFormatted; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getPartSpec() { + return partSpec; + } + + public String getColumnPath() { + return colPath; + } + + public boolean isExt() { + return isExt; + } + + public boolean isFormatted() { + return isFormatted; + } + + /** + * thrift ddl for the result of describe table. + */ + private static final String SCHEMA = "col_name,data_type,comment#string:string:string"; + private static final String COL_STATS_SCHEMA = "col_name,data_type,min,max,num_nulls," + + "distinct_count,avg_col_len,max_col_len,num_trues,num_falses,bitVector,comment" + + "#string:string:string:string:string:string:string:string:string:string:string:string"; + + public static String getSchema(boolean colStats) { + if (colStats) { + return COL_STATS_SCHEMA; + } + return SCHEMA; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java new file mode 100644 index 0000000..fc0543f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java @@ -0,0 +1,279 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.DataOutputStream; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.StatObjectConverter; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.CheckConstraint; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; +import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.PartitionIterable; +import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; +import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.AbstractSerDe; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.io.IOUtils; + +/** + * Operation process of dropping a table. + */ +public class DescTableOperation extends DDLOperation { + private final DescTableDesc desc; + + public DescTableOperation(DDLOperationContext context, DescTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws Exception { + String colPath = desc.getColumnPath(); + String tableName = desc.getTableName(); + + // describe the table - populate the output stream + Table tbl = context.getDb().getTable(tableName, false); + if (tbl == null) { + throw new HiveException(ErrorMsg.INVALID_TABLE, tableName); + } + Partition part = null; + if (desc.getPartSpec() != null) { + part = context.getDb().getPartition(tbl, desc.getPartSpec(), false); + if (part == null) { + throw new HiveException(ErrorMsg.INVALID_PARTITION, + StringUtils.join(desc.getPartSpec().keySet(), ','), tableName); + } + tbl = part.getTable(); + } + + DataOutputStream outStream = getOutputStream(new Path(desc.getResFile())); + try { + LOG.debug("DDLTask: got data for {}", tableName); + + List cols = null; + List colStats = null; + + Deserializer deserializer = tbl.getDeserializer(true); + if (deserializer instanceof AbstractSerDe) { + String errorMsgs = ((AbstractSerDe) deserializer).getConfigurationErrors(); + if (errorMsgs != null && !errorMsgs.isEmpty()) { + throw new SQLException(errorMsgs); + } + } + + if (colPath.equals(tableName)) { + cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ? + tbl.getCols() : part.getCols(); + + if (!desc.isFormatted()) { + cols.addAll(tbl.getPartCols()); + } + + if (tbl.isPartitioned() && part == null) { + // No partitioned specified for partitioned table, lets fetch all. + Map tblProps = tbl.getParameters() == null ? + new HashMap() : tbl.getParameters(); + Map valueMap = new HashMap<>(); + Map stateMap = new HashMap<>(); + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + valueMap.put(stat, 0L); + stateMap.put(stat, true); + } + PartitionIterable parts = new PartitionIterable(context.getDb(), tbl, null, + context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + int numParts = 0; + for (Partition partition : parts) { + Map props = partition.getParameters(); + Boolean state = StatsSetupConst.areBasicStatsUptoDate(props); + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + stateMap.put(stat, stateMap.get(stat) && state); + if (props != null && props.get(stat) != null) { + valueMap.put(stat, valueMap.get(stat) + Long.parseLong(props.get(stat))); + } + } + numParts++; + } + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat))); + tblProps.put(stat, valueMap.get(stat).toString()); + } + tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts)); + tbl.setParameters(tblProps); + } + } else { + if (desc.isFormatted()) { + // when column name is specified in describe table DDL, colPath will + // will be table_name.column_name + String colName = colPath.split("\\.")[1]; + String[] dbTab = Utilities.getDbTableName(tableName); + List colNames = new ArrayList(); + colNames.add(colName.toLowerCase()); + if (null == part) { + if (tbl.isPartitioned()) { + Map tblProps = tbl.getParameters() == null ? + new HashMap() : tbl.getParameters(); + if (tbl.isPartitionKey(colNames.get(0))) { + FieldSchema partCol = tbl.getPartColByName(colNames.get(0)); + cols = Collections.singletonList(partCol); + PartitionIterable parts = new PartitionIterable(context.getDb(), tbl, null, + context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + ColumnInfo ci = new ColumnInfo(partCol.getName(), + TypeInfoUtils.getTypeInfoFromTypeString(partCol.getType()), null, false); + ColStatistics cs = StatsUtils.getColStatsForPartCol(ci, parts, context.getConf()); + ColumnStatisticsData data = new ColumnStatisticsData(); + ColStatistics.Range r = cs.getRange(); + StatObjectConverter.fillColumnStatisticsData(partCol.getType(), data, r == null ? null : r.minValue, + r == null ? null : r.maxValue, r == null ? null : r.minValue, r == null ? null : r.maxValue, + r == null ? null : r.minValue.toString(), r == null ? null : r.maxValue.toString(), + cs.getNumNulls(), cs.getCountDistint(), null, cs.getAvgColLen(), cs.getAvgColLen(), + cs.getNumTrues(), cs.getNumFalses()); + ColumnStatisticsObj cso = new ColumnStatisticsObj(partCol.getName(), partCol.getType(), data); + colStats = Collections.singletonList(cso); + StatsSetupConst.setColumnStatsState(tblProps, colNames); + } else { + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + List parts = context.getDb().getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), + (short) -1); + AggrStats aggrStats = context.getDb().getAggrColStatsFor( + dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false); + colStats = aggrStats.getColStats(); + if (parts.size() == aggrStats.getPartsFound()) { + StatsSetupConst.setColumnStatsState(tblProps, colNames); + } else { + StatsSetupConst.removeColumnStatsState(tblProps, colNames); + } + } + tbl.setParameters(tblProps); + } else { + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + colStats = context.getDb().getTableColumnStatistics( + dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false); + } + } else { + List partitions = new ArrayList(); + partitions.add(part.getName()); + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + colStats = context.getDb().getPartitionColumnStatistics(dbTab[0].toLowerCase(), + dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName()); + } + } else { + cols = Hive.getFieldsFromDeserializer(colPath, deserializer); + } + } + PrimaryKeyInfo pkInfo = null; + ForeignKeyInfo fkInfo = null; + UniqueConstraint ukInfo = null; + NotNullConstraint nnInfo = null; + DefaultConstraint dInfo = null; + CheckConstraint cInfo = null; + StorageHandlerInfo storageHandlerInfo = null; + if (desc.isExt() || desc.isFormatted()) { + pkInfo = context.getDb().getPrimaryKeys(tbl.getDbName(), tbl.getTableName()); + fkInfo = context.getDb().getForeignKeys(tbl.getDbName(), tbl.getTableName()); + ukInfo = context.getDb().getUniqueConstraints(tbl.getDbName(), tbl.getTableName()); + nnInfo = context.getDb().getNotNullConstraints(tbl.getDbName(), tbl.getTableName()); + dInfo = context.getDb().getDefaultConstraints(tbl.getDbName(), tbl.getTableName()); + cInfo = context.getDb().getCheckConstraints(tbl.getDbName(), tbl.getTableName()); + storageHandlerInfo = context.getDb().getStorageHandlerInfo(tbl); + } + fixDecimalColumnTypeName(cols); + // Information for materialized views + if (tbl.isMaterializedView()) { + final String validTxnsList = context.getDb().getConf().get(ValidTxnList.VALID_TXNS_KEY); + if (validTxnsList != null) { + List tablesUsed = new ArrayList<>(tbl.getCreationMetadata().getTablesUsed()); + ValidTxnWriteIdList currentTxnWriteIds = + SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList); + long defaultTimeWindow = HiveConf.getTimeVar(context.getDb().getConf(), + HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW, TimeUnit.MILLISECONDS); + tbl.setOutdatedForRewriting(Hive.isOutdatedMaterializedView(tbl, + currentTxnWriteIds, defaultTimeWindow, tablesUsed, false)); + } + } + // In case the query is served by HiveServer2, don't pad it with spaces, + // as HiveServer2 output is consumed by JDBC/ODBC clients. + boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + context.getFormatter().describeTable(outStream, colPath, tableName, tbl, part, + cols, desc.isFormatted(), desc.isExt(), isOutputPadded, + colStats, pkInfo, fkInfo, ukInfo, nnInfo, dInfo, cInfo, + storageHandlerInfo); + + LOG.debug("DDLTask: written data for {}", tableName); + + } catch (SQLException e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName); + } finally { + IOUtils.closeStream(outStream); + } + + return 0; + } + + /** + * Fix the type name of a column of type decimal w/o precision/scale specified. This makes + * the describe table show "decimal(10,0)" instead of "decimal" even if the type stored + * in metastore is "decimal", which is possible with previous hive. + * + * @param cols columns that to be fixed as such + */ + private static void fixDecimalColumnTypeName(List cols) { + for (FieldSchema col : cols) { + if (serdeConstants.DECIMAL_TYPE_NAME.equals(col.getType())) { + col.setType(DecimalTypeInfo.getQualifiedName(HiveDecimal.USER_DEFAULT_PRECISION, + HiveDecimal.USER_DEFAULT_SCALE)); + } + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java new file mode 100644 index 0000000..f910c57 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableDesc.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for DROP TABLE commands. + */ +@Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropTableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(DropTableDesc.class, DropTableOperation.class); + } + + private final String tableName; + private final TableType expectedType; + private final boolean ifExists; + private final boolean ifPurge; + private final ReplicationSpec replicationSpec; + private final boolean validationRequired; + + public DropTableDesc(String tableName, TableType expectedType, boolean ifExists, boolean ifPurge, + ReplicationSpec replicationSpec) { + this(tableName, expectedType, ifExists, ifPurge, replicationSpec, true); + } + + public DropTableDesc(String tableName, TableType expectedType, boolean ifExists, boolean ifPurge, + ReplicationSpec replicationSpec, boolean validationRequired) { + this.tableName = tableName; + this.expectedType = expectedType; + this.ifExists = ifExists; + this.ifPurge = ifPurge; + this.replicationSpec = replicationSpec == null ? new ReplicationSpec() : replicationSpec; + this.validationRequired = validationRequired; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + public boolean getExpectView() { + return expectedType != null && expectedType == TableType.VIRTUAL_VIEW; + } + + public boolean getExpectMaterializedView() { + return expectedType != null && expectedType == TableType.MATERIALIZED_VIEW; + } + + public boolean getIfExists() { + return ifExists; + } + + public boolean getIfPurge() { + return ifPurge; + } + + /** + * @return what kind of replication scope this drop is running under. + * This can result in a "DROP IF OLDER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec(){ + return this.replicationSpec; + } + + public boolean getValidationRequired(){ + return this.validationRequired; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java new file mode 100644 index 0000000..1ce07b5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DropTableOperation.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.PartitionIterable; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +import com.google.common.collect.Iterables; + +/** + * Operation process of dropping a table. + */ +public class DropTableOperation extends DDLOperation { + private final DropTableDesc desc; + + public DropTableOperation(DDLOperationContext context, DropTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Table tbl = null; + try { + tbl = context.getDb().getTable(desc.getTableName()); + } catch (InvalidTableException e) { + // drop table is idempotent + } + + // This is a true DROP TABLE + if (tbl != null && desc.getValidationRequired()) { + if (tbl.isView()) { + if (!desc.getExpectView()) { + if (desc.getIfExists()) { + return 0; + } + if (desc.getExpectMaterializedView()) { + throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW"); + } else { + throw new HiveException("Cannot drop a view with DROP TABLE"); + } + } + } else if (tbl.isMaterializedView()) { + if (!desc.getExpectMaterializedView()) { + if (desc.getIfExists()) { + return 0; + } + if (desc.getExpectView()) { + throw new HiveException("Cannot drop a materialized view with DROP VIEW"); + } else { + throw new HiveException("Cannot drop a materialized view with DROP TABLE"); + } + } + } else { + if (desc.getExpectView()) { + if (desc.getIfExists()) { + return 0; + } + throw new HiveException("Cannot drop a base table with DROP VIEW"); + } else if (desc.getExpectMaterializedView()) { + if (desc.getIfExists()) { + return 0; + } + throw new HiveException("Cannot drop a base table with DROP MATERIALIZED VIEW"); + } + } + } + + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + if (tbl != null && replicationSpec.isInReplicationScope()) { + /** + * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely + * matches a DROP TABLE IF OLDER THAN(x) semantic. + * + * Ideally, commands executed under the scope of replication need to be idempotent and resilient + * to repeats. What can happen, sometimes, is that a drone processing a replication task can + * have been abandoned for not returning in time, but still execute its task after a while, + * which should not result in it mucking up data that has been impressed later on. So, for eg., + * if we create partition P1, followed by droppping it, followed by creating it yet again, + * the replication of that drop should not drop the newer partition if it runs after the destination + * object is already in the newer state. + * + * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can + * drop the object in question(will return false if object is newer than the event, true if not) + * + * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP + * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must + * do one more thing - if it does not drop the table because the table is in a newer state, it must + * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL + * acts like a recursive DROP TABLE IF OLDER. + */ + if (!replicationSpec.allowEventReplacementInto(tbl.getParameters())) { + // Drop occured as part of replicating a drop, but the destination + // table was newer than the event being replicated. Ignore, but drop + // any partitions inside that are older. + if (tbl.isPartitioned()) { + PartitionIterable partitions = new PartitionIterable(context.getDb(), tbl, null, + context.getConf().getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); + for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())){ + context.getDb().dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true); + } + } + LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", desc.getTableName()); + return 0; // table is newer, leave it be. + } + } + + // drop the table + // TODO: API w/catalog name + context.getDb().dropTable(desc.getTableName(), desc.getIfPurge()); + if (tbl != null) { + // Remove from cache if it is a materialized view + if (tbl.isMaterializedView()) { + HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl); + } + // We have already locked the table in DDLSemanticAnalyzer, don't do it again here + addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java new file mode 100644 index 0000000..2a8b02e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableDesc.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + + +/** + * DDL task description for LOCK TABLE commands. + */ +@Explain(displayName = "Lock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class LockTableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(LockTableDesc.class, LockTableOperation.class); + } + + private final String tableName; + private final String mode; + private final Map partSpec; + private final String queryId; + private final String queryStr; + + public LockTableDesc(String tableName, String mode, Map partSpec, String queryId, String queryStr) { + this.tableName = tableName; + this.mode = mode; + this.partSpec = partSpec; + this.queryId = queryId; + this.queryStr = queryStr; + } + + public String getTableName() { + return tableName; + } + + public String getMode() { + return mode; + } + + public Map getPartSpec() { + return partSpec; + } + + public String getQueryId() { + return queryId; + } + + public String getQueryStr() { + return queryStr; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java new file mode 100644 index 0000000..2044a81 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/LockTableOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of locking a table. + */ +public class LockTableOperation extends DDLOperation { + private final LockTableDesc desc; + + public LockTableOperation(DDLOperationContext context, LockTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Context ctx = context.getDriverContext().getCtx(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + return txnManager.lockTable(context.getDb(), desc); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java new file mode 100644 index 0000000..4bb609e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableDesc.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for PRE INSERT commands. + */ +@Explain(displayName = "Pre-Insert task", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class PreInsertTableDesc implements DDLDesc { + static { + DDLTask2.registerOperation(PreInsertTableDesc.class, PreInsertTableOperation.class); + } + + private final Table table; + private final boolean isOverwrite; + + public PreInsertTableDesc(Table table, boolean overwrite) { + this.table = table; + this.isOverwrite = overwrite; + } + + public Table getTable() { + return table; + } + + public boolean isOverwrite() { + return isOverwrite; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java new file mode 100644 index 0000000..5d85d0a --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/PreInsertTableOperation.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of pre inserting a table. + */ +public class PreInsertTableOperation extends DDLOperation { + private final PreInsertTableDesc desc; + + public PreInsertTableOperation(DDLOperationContext context, PreInsertTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + try { + HiveMetaHook hook = desc.getTable().getStorageHandler().getMetaHook(); + if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { + return 0; + } + + DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; + hiveMetaHook.preInsertTable(desc.getTable().getTTable(), desc.isOverwrite()); + } catch (MetaException e) { + throw new HiveException(e); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java new file mode 100644 index 0000000..f221e86 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableDesc.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for SHOW CREATE TABLE commands. + */ +@Explain(displayName = "Show Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowCreateTableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + /** + * Thrift ddl for the result of showcreatetable. + */ + public static final String SCHEMA = "createtab_stmt#string"; + + private final String resFile; + private final String tableName; + + public ShowCreateTableDesc(String tableName, String resFile) { + this.tableName = tableName; + this.resFile = resFile; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java new file mode 100644 index 0000000..b624763 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowCreateTableOperation.java @@ -0,0 +1,281 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.io.IOUtils; +import org.apache.hive.common.util.HiveStringUtils; +import org.stringtemplate.v4.ST; + +/** + * Operation process showing the creation of a table. + */ +public class ShowCreateTableOperation extends DDLOperation { + private static final String EXTERNAL = "external"; + private static final String TEMPORARY = "temporary"; + private static final String LIST_COLUMNS = "columns"; + private static final String TBL_COMMENT = "tbl_comment"; + private static final String LIST_PARTITIONS = "partitions"; + private static final String SORT_BUCKET = "sort_bucket"; + private static final String SKEWED_INFO = "tbl_skewedinfo"; + private static final String ROW_FORMAT = "row_format"; + private static final String TBL_LOCATION = "tbl_location"; + private static final String TBL_PROPERTIES = "tbl_properties"; + + private final ShowCreateTableDesc desc; + + public ShowCreateTableOperation(DDLOperationContext context, ShowCreateTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // get the create table statement for the table and populate the output + DataOutputStream outStream = getOutputStream(new Path(desc.getResFile())); + try { + return showCreateTable(outStream); + } catch (Exception e) { + throw new HiveException(e); + } finally { + IOUtils.closeStream(outStream); + } + } + + private int showCreateTable(DataOutputStream outStream) throws HiveException { + boolean needsLocation = true; + StringBuilder createTabCommand = new StringBuilder(); + + Table tbl = context.getDb().getTable(desc.getTableName(), false); + List duplicateProps = new ArrayList(); + try { + needsLocation = CreateTableOperation.doesTableNeedLocation(tbl); + + if (tbl.isView()) { + String createTabStmt = "CREATE VIEW `" + desc.getTableName() + "` AS " + tbl.getViewExpandedText(); + outStream.write(createTabStmt.getBytes(StandardCharsets.UTF_8)); + return 0; + } + + createTabCommand.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `"); + createTabCommand.append(desc.getTableName() + "`(\n"); + createTabCommand.append("<" + LIST_COLUMNS + ">)\n"); + createTabCommand.append("<" + TBL_COMMENT + ">\n"); + createTabCommand.append("<" + LIST_PARTITIONS + ">\n"); + createTabCommand.append("<" + SORT_BUCKET + ">\n"); + createTabCommand.append("<" + SKEWED_INFO + ">\n"); + createTabCommand.append("<" + ROW_FORMAT + ">\n"); + if (needsLocation) { + createTabCommand.append("LOCATION\n"); + createTabCommand.append("<" + TBL_LOCATION + ">\n"); + } + createTabCommand.append("TBLPROPERTIES (\n"); + createTabCommand.append("<" + TBL_PROPERTIES + ">)\n"); + ST createTabStmt = new ST(createTabCommand.toString()); + + // For cases where the table is temporary + String tblTemp = ""; + if (tbl.isTemporary()) { + duplicateProps.add("TEMPORARY"); + tblTemp = "TEMPORARY "; + } + // For cases where the table is external + String tblExternal = ""; + if (tbl.getTableType() == TableType.EXTERNAL_TABLE) { + duplicateProps.add("EXTERNAL"); + tblExternal = "EXTERNAL "; + } + + // Columns + String tblColumns = ""; + List cols = tbl.getCols(); + List columns = new ArrayList(); + for (FieldSchema col : cols) { + String columnDesc = " `" + col.getName() + "` " + col.getType(); + if (col.getComment() != null) { + columnDesc = columnDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'"; + } + columns.add(columnDesc); + } + tblColumns = StringUtils.join(columns, ", \n"); + + // Table comment + String tblComment = ""; + String tabComment = tbl.getProperty("comment"); + if (tabComment != null) { + duplicateProps.add("comment"); + tblComment = "COMMENT '" + HiveStringUtils.escapeHiveCommand(tabComment) + "'"; + } + + // Partitions + String tblPartitions = ""; + List partKeys = tbl.getPartitionKeys(); + if (partKeys.size() > 0) { + tblPartitions += "PARTITIONED BY ( \n"; + List partCols = new ArrayList(); + for (FieldSchema partKey : partKeys) { + String partColDesc = " `" + partKey.getName() + "` " + partKey.getType(); + if (partKey.getComment() != null) { + partColDesc = partColDesc + " COMMENT '" + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'"; + } + partCols.add(partColDesc); + } + tblPartitions += StringUtils.join(partCols, ", \n"); + tblPartitions += ")"; + } + + // Clusters (Buckets) + String tblSortBucket = ""; + List buckCols = tbl.getBucketCols(); + if (buckCols.size() > 0) { + duplicateProps.add("SORTBUCKETCOLSPREFIX"); + tblSortBucket += "CLUSTERED BY ( \n "; + tblSortBucket += StringUtils.join(buckCols, ", \n "); + tblSortBucket += ") \n"; + List sortCols = tbl.getSortCols(); + if (sortCols.size() > 0) { + tblSortBucket += "SORTED BY ( \n"; + // Order + List sortKeys = new ArrayList(); + for (Order sortCol : sortCols) { + String sortKeyDesc = " " + sortCol.getCol() + " "; + if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { + sortKeyDesc = sortKeyDesc + "ASC"; + } else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { + sortKeyDesc = sortKeyDesc + "DESC"; + } + sortKeys.add(sortKeyDesc); + } + tblSortBucket += StringUtils.join(sortKeys, ", \n"); + tblSortBucket += ") \n"; + } + tblSortBucket += "INTO " + tbl.getNumBuckets() + " BUCKETS"; + } + + // Skewed Info + StringBuilder tblSkewedInfo = new StringBuilder(); + SkewedInfo skewedInfo = tbl.getSkewedInfo(); + if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) { + tblSkewedInfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n"); + tblSkewedInfo.append(" ON ("); + List colValueList = new ArrayList(); + for (List colValues : skewedInfo.getSkewedColValues()) { + colValueList.add("('" + StringUtils.join(colValues, "','") + "')"); + } + tblSkewedInfo.append(StringUtils.join(colValueList, ",") + ")"); + if (tbl.isStoredAsSubDirectories()) { + tblSkewedInfo.append("\n STORED AS DIRECTORIES"); + } + } + + // Row format (SerDe) + StringBuilder tblRowFormat = new StringBuilder(); + StorageDescriptor sd = tbl.getTTable().getSd(); + SerDeInfo serdeInfo = sd.getSerdeInfo(); + Map serdeParams = serdeInfo.getParameters(); + tblRowFormat.append("ROW FORMAT SERDE \n"); + tblRowFormat.append(" '" + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n"); + if (tbl.getStorageHandler() == null) { + // If serialization.format property has the default value, it will not to be included in + // SERDE properties + if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get(serdeConstants.SERIALIZATION_FORMAT))) { + serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); + } + if (!serdeParams.isEmpty()) { + appendSerdeParams(tblRowFormat, serdeParams).append(" \n"); + } + tblRowFormat.append("STORED AS INPUTFORMAT \n '" + + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n"); + tblRowFormat.append("OUTPUTFORMAT \n '" + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'"); + } else { + duplicateProps.add(META_TABLE_STORAGE); + tblRowFormat.append("STORED BY \n '" + + HiveStringUtils.escapeHiveCommand(tbl.getParameters().get(META_TABLE_STORAGE)) + "' \n"); + // SerDe Properties + if (!serdeParams.isEmpty()) { + appendSerdeParams(tblRowFormat, serdeInfo.getParameters()); + } + } + String tblLocation = " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'"; + + // Table properties + duplicateProps.addAll(StatsSetupConst.TABLE_PARAMS_STATS_KEYS); + String tblProperties = propertiesToString(tbl.getParameters(), duplicateProps); + + createTabStmt.add(TEMPORARY, tblTemp); + createTabStmt.add(EXTERNAL, tblExternal); + createTabStmt.add(LIST_COLUMNS, tblColumns); + createTabStmt.add(TBL_COMMENT, tblComment); + createTabStmt.add(LIST_PARTITIONS, tblPartitions); + createTabStmt.add(SORT_BUCKET, tblSortBucket); + createTabStmt.add(SKEWED_INFO, tblSkewedInfo); + createTabStmt.add(ROW_FORMAT, tblRowFormat); + // Table location should not be printed with hbase backed tables + if (needsLocation) { + createTabStmt.add(TBL_LOCATION, tblLocation); + } + createTabStmt.add(TBL_PROPERTIES, tblProperties); + + outStream.write(createTabStmt.render().getBytes(StandardCharsets.UTF_8)); + } catch (IOException e) { + LOG.info("show create table: ", e); + return 1; + } + + return 0; + } + + public static StringBuilder appendSerdeParams(StringBuilder builder, Map serdeParam) { + serdeParam = new TreeMap(serdeParam); + builder.append("WITH SERDEPROPERTIES ( \n"); + List serdeCols = new ArrayList(); + for (Entry entry : serdeParam.entrySet()) { + serdeCols.add(" '" + entry.getKey() + "'='" + HiveStringUtils.escapeHiveCommand(entry.getValue()) + "'"); + } + builder.append(StringUtils.join(serdeCols, ", \n")).append(')'); + return builder; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java new file mode 100644 index 0000000..72caa58 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesDesc.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for SHOW TABLE PROPERTIES commands. + */ +@Explain(displayName = "Show Table Properties", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowTablePropertiesDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(ShowTablePropertiesDesc.class, ShowTablePropertiesOperation.class); + } + + /** + * Thrift ddl for the result of showtblproperties. + */ + public static final String SCHEMA = "prpt_name,prpt_value#string:string"; + + private final String resFile; + private final String tableName; + private final String propertyName; + + public ShowTablePropertiesDesc(String resFile, String tableName, String propertyName) { + this.resFile = resFile; + this.tableName = tableName; + this.propertyName = propertyName; + } + + public String getResFile() { + return resFile; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFileString() { + return getResFile(); + } + + @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Explain(displayName = "property name") + public String getPropertyName() { + return propertyName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java new file mode 100644 index 0000000..a6ad013 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablePropertiesOperation.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; + +/** + * Operation process showing the table status. + */ +public class ShowTablePropertiesOperation extends DDLOperation { + private final ShowTablePropertiesDesc desc; + + public ShowTablePropertiesOperation(DDLOperationContext context, ShowTablePropertiesDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + String tableName = desc.getTableName(); + + // show table properties - populate the output stream + Table tbl = context.getDb().getTable(tableName, false); + try { + if (tbl == null) { + String errMsg = "Table " + tableName + " does not exist"; + writeToFile(errMsg, desc.getResFile()); + return 0; + } + + LOG.info("DDLTask: show properties for {}", tableName); + + StringBuilder builder = new StringBuilder(); + String propertyName = desc.getPropertyName(); + if (propertyName != null) { + String propertyValue = tbl.getProperty(propertyName); + if (propertyValue == null) { + String errMsg = "Table " + tableName + " does not have property: " + propertyName; + builder.append(errMsg); + } else { + appendNonNull(builder, propertyName, true); + appendNonNull(builder, propertyValue); + } + } else { + Map properties = new TreeMap(tbl.getParameters()); + for (Entry entry : properties.entrySet()) { + appendNonNull(builder, entry.getKey(), true); + appendNonNull(builder, entry.getValue()); + } + } + + LOG.info("DDLTask: written data for showing properties of {}", tableName); + writeToFile(builder.toString(), desc.getResFile()); + } catch (IOException e) { + LOG.info("show table properties: ", e); + return 1; + } catch (Exception e) { + throw new HiveException(e); + } + + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java new file mode 100644 index 0000000..8c312a0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusDesc.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for SHOW TABLE STATUS commands. + */ +@Explain(displayName = "Show Table Status", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowTableStatusDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(ShowTableStatusDesc.class, ShowTableStatusOperation.class); + } + + /** + * Thrift ddl for the result of show tables. + */ + public static final String SCHEMA = "tab_name#string"; + + private final String resFile; + private final String dbName; + private final String pattern; + private final Map partSpec; + + public ShowTableStatusDesc(String resFile, String dbName, String pattern) { + this(resFile, dbName, pattern, null); + } + + public ShowTableStatusDesc(String resFile, String dbName, String pattern, Map partSpec) { + this.resFile = resFile; + this.dbName = dbName; + this.pattern = pattern; + this.partSpec = partSpec; + } + + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + public String getResFile() { + return resFile; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFileString() { + return getResFile(); + } + + @Explain(displayName = "database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDbName() { + return dbName; + } + + @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getPartSpec() { + return partSpec; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java new file mode 100644 index 0000000..88c471d --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTableStatusOperation.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.io.IOUtils; + +/** + * Operation process showing the table status. + */ +public class ShowTableStatusOperation extends DDLOperation { + private final ShowTableStatusDesc desc; + + public ShowTableStatusOperation(DDLOperationContext context, ShowTableStatusDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + // get the tables for the desired pattern - populate the output stream + List tbls = new ArrayList
(); + Map part = desc.getPartSpec(); + Partition par = null; + if (part != null) { + Table tbl = context.getDb().getTable(desc.getDbName(), desc.getPattern()); + par = context.getDb().getPartition(tbl, part, false); + if (par == null) { + throw new HiveException("Partition " + part + " for table " + desc.getPattern() + " does not exist."); + } + tbls.add(tbl); + } else { + LOG.debug("pattern: {}", desc.getPattern()); + List tblStr = context.getDb().getTablesForDb(desc.getDbName(), desc.getPattern()); + SortedSet sortedTbls = new TreeSet(tblStr); + Iterator iterTbls = sortedTbls.iterator(); + while (iterTbls.hasNext()) { + // create a row per table name + String tblName = iterTbls.next(); + Table tbl = context.getDb().getTable(desc.getDbName(), tblName); + tbls.add(tbl); + } + LOG.info("Found {} table(s) matching the SHOW TABLE EXTENDED statement.", tblStr.size()); + } + + // write the results in the file + DataOutputStream outStream = getOutputStream(new Path(desc.getResFile())); + try { + context.getFormatter().showTableStatus(outStream, context.getDb(), context.getConf(), tbls, part, par); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show table status"); + } finally { + IOUtils.closeStream(outStream); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java new file mode 100644 index 0000000..584433b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesDesc.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for SHOW TABLES commands. + */ +@Explain(displayName = "Show Tables", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowTablesDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(ShowTablesDesc.class, ShowTablesOperation.class); + } + + /** + * thrift ddl for the result of show tables and show views. + */ + private static final String TABLES_VIEWS_SCHEMA = "tab_name#string"; + + /** + * thrift ddl for the result of show extended tables. + */ + private static final String EXTENDED_TABLES_SCHEMA = "tab_name,table_type#string,string"; + + /** + * thrift ddl for the result of show tables. + */ + private static final String MATERIALIZED_VIEWS_SCHEMA = + "mv_name,rewrite_enabled,mode#string:string:string"; + + private final String resFile; + private final String dbName; + private final String pattern; + private final TableType type; + private final TableType typeFilter; + private final boolean isExtended; + + public ShowTablesDesc(Path resFile) { + this(resFile, null, null, null, null, false); + } + + public ShowTablesDesc(Path resFile, String dbName) { + this(resFile, dbName, null, null, null, false); + } + + public ShowTablesDesc(Path resFile, String dbName, TableType type) { + this(resFile, dbName, null, type, null, false); + } + + public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType typeFilter, boolean isExtended) { + this(resFile, dbName, pattern, null, typeFilter, isExtended); + } + + public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type) { + this(resFile, dbName, pattern, type, null, false); + } + + + public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type, TableType typeFilter, + boolean isExtended) { + this.resFile = resFile.toString(); + this.dbName = dbName; + this.pattern = pattern; + this.type = type; + this.typeFilter = typeFilter; + this.isExtended = isExtended; + } + + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + @Explain(displayName = "type") + public TableType getType() { + return type; + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDbName() { + return dbName; + } + + @Explain(displayName = "extended", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, + displayOnlyOnTrue = true) + public boolean isExtended() { + return isExtended; + } + + @Explain(displayName = "table type filter", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public TableType getTypeFilter() { + return typeFilter; + } + + public String getSchema() { + if (type != null && type == TableType.MATERIALIZED_VIEW) { + return MATERIALIZED_VIEWS_SCHEMA; + } + return isExtended ? EXTENDED_TABLES_SCHEMA : TABLES_VIEWS_SCHEMA; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java new file mode 100644 index 0000000..71b5717 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/ShowTablesOperation.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; + +import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.io.IOUtils; + +/** + * Operation process showing the tables. + */ +public class ShowTablesOperation extends DDLOperation { + private final ShowTablesDesc desc; + + public ShowTablesOperation(DDLOperationContext context, ShowTablesDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + TableType type = desc.getType(); // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs + String dbName = desc.getDbName(); + String pattern = desc.getPattern(); // if null, all tables/views are returned + TableType typeFilter = desc.getTypeFilter(); + String resultsFile = desc.getResFile(); + boolean isExtended = desc.isExtended(); + + if (!context.getDb().databaseExists(dbName)) { + throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); + } + + LOG.debug("pattern: {}", pattern); + LOG.debug("typeFilter: {}", typeFilter); + + List tableNames = null; + List
tableObjects = null; + if (type == null) { + if (isExtended) { + tableObjects = new ArrayList<>(); + tableObjects.addAll(context.getDb().getTableObjectsByType(dbName, pattern, typeFilter)); + LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size()); + } else { + tableNames = context.getDb().getTablesByType(dbName, pattern, typeFilter); + LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size()); + } + } else if (type == TableType.MATERIALIZED_VIEW) { + tableObjects = new ArrayList<>(); + tableObjects.addAll(context.getDb().getMaterializedViewObjectsByPattern(dbName, pattern)); + LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size()); + } else if (type == TableType.VIRTUAL_VIEW) { + tableNames = context.getDb().getTablesByType(dbName, pattern, type); + LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size()); + } else { + throw new HiveException("Option not recognized in SHOW TABLES/VIEWS/MATERIALIZED VIEWS"); + } + + // write the results in the file + DataOutputStream outStream = null; + try { + Path resFile = new Path(resultsFile); + FileSystem fs = resFile.getFileSystem(context.getConf()); + outStream = fs.create(resFile); + // Sort by name and print + if (tableNames != null) { + SortedSet sortedSet = new TreeSet(tableNames); + context.getFormatter().showTables(outStream, sortedSet); + } else { + Collections.sort(tableObjects, Comparator.comparing(Table::getTableName)); + if (isExtended) { + context.getFormatter().showTablesExtended(outStream, tableObjects); + } else { + context.getFormatter().showMaterializedViews(outStream, tableObjects); + } + } + outStream.close(); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName); + } finally { + IOUtils.closeStream(outStream); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java new file mode 100644 index 0000000..1f0cd82 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableDesc.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; +import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for TRUNCATE TABLE commands. + */ +@Explain(displayName = "Truncate Table or Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class TruncateTableDesc implements DDLDesc, Serializable, DDLDescWithWriteId { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(TruncateTableDesc.class, TruncateTableOperation.class); + } + + private final String tableName; + private final String fullTableName; + private final Map partSpec; + private final ReplicationSpec replicationSpec; + private final boolean isTransactional; + + private List columnIndexes; + private Path inputDir; + private Path outputDir; + private ListBucketingCtx lbCtx; + + private long writeId = 0; + + public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec) { + this(tableName, partSpec, replicationSpec, null); + } + + public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec, + Table table) { + this.tableName = tableName; + this.fullTableName = table == null ? tableName : TableName.getDbTable(table.getDbName(), table.getTableName()); + this.partSpec = partSpec; + this.replicationSpec = replicationSpec; + this.isTransactional = AcidUtils.isTransactionalTable(table); + } + + @Explain(displayName = "TableName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + @Override + public String getFullTableName() { + return fullTableName; + } + + @Explain(displayName = "Partition Spec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Map getPartSpec() { + return partSpec; + } + + /** + * @return what kind of replication scope this truncate is running under. + * This can result in a "TRUNCATE IF NEWER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec() { + return replicationSpec; + } + + @Explain(displayName = "Column Indexes") + public List getColumnIndexes() { + return columnIndexes; + } + + public void setColumnIndexes(List columnIndexes) { + this.columnIndexes = columnIndexes; + } + + public Path getInputDir() { + return inputDir; + } + + public void setInputDir(Path inputDir) { + this.inputDir = inputDir; + } + + public Path getOutputDir() { + return outputDir; + } + + public void setOutputDir(Path outputDir) { + this.outputDir = outputDir; + } + + public ListBucketingCtx getLbCtx() { + return lbCtx; + } + + public void setLbCtx(ListBucketingCtx lbCtx) { + this.lbCtx = lbCtx; + } + + @Override + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + @Override + public boolean mayNeedWriteId() { + return isTransactional; + } + + public long getWriteId() { + return writeId; + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " for " + getFullTableName(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java new file mode 100644 index 0000000..344257e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/TruncateTableOperation.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Map; + +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask; +import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; + +/** + * Operation process of truncating a table. + */ +public class TruncateTableOperation extends DDLOperation { + private final TruncateTableDesc desc; + + public TruncateTableOperation(DDLOperationContext context, TruncateTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + if (desc.getColumnIndexes() != null) { + ColumnTruncateWork truncateWork = new ColumnTruncateWork(desc.getColumnIndexes(), desc.getInputDir(), + desc.getOutputDir()); + truncateWork.setListBucketingCtx(desc.getLbCtx()); + truncateWork.setMapperCannotSpanPartns(true); + DriverContext driverCxt = new DriverContext(); + ColumnTruncateTask taskExec = new ColumnTruncateTask(); + taskExec.initialize(context.getQueryState(), null, driverCxt, null); + taskExec.setWork(truncateWork); + taskExec.setQueryPlan(context.getQueryPlan()); + Task subtask = taskExec; + int ret = taskExec.execute(driverCxt); + if (subtask.getException() != null) { + context.getTask().setException(subtask.getException()); + } + return ret; + } + + String tableName = desc.getTableName(); + Map partSpec = desc.getPartSpec(); + + ReplicationSpec replicationSpec = desc.getReplicationSpec(); + if (!allowOperationInReplicationScope(context.getDb(), tableName, partSpec, replicationSpec)) { + // no truncate, the table is missing either due to drop/rename which follows the truncate. + // or the existing table is newer than our update. + LOG.debug("DDLTask: Truncate Table/Partition is skipped as table {} / partition {} is newer than update", + tableName, (partSpec == null) ? + "null" : FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values()))); + return 0; + } + + try { + context.getDb().truncateTable(tableName, partSpec, + replicationSpec != null && replicationSpec.isInReplicationScope() ? desc.getWriteId() : 0L); + } catch (Exception e) { + throw new HiveException(e, ErrorMsg.GENERIC_ERROR); + } + return 0; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java new file mode 100644 index 0000000..8605024 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableDesc.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DDL task description for UNLOCK TABLE commands. + */ +@Explain(displayName = "Unlock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class UnlockTableDesc implements DDLDesc, Serializable { + private static final long serialVersionUID = 1L; + + static { + DDLTask2.registerOperation(UnlockTableDesc.class, UnlockTableOperation.class); + } + + private final String tableName; + private final Map partSpec; + + public UnlockTableDesc(String tableName, Map partSpec) { + this.tableName = tableName; + this.partSpec = partSpec; + } + + public String getTableName() { + return tableName; + } + + public Map getPartSpec() { + return partSpec; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java new file mode 100644 index 0000000..8b70e06 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/UnlockTableOperation.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table; + +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * Operation process of unlocking a table. + */ +public class UnlockTableOperation extends DDLOperation { + private final UnlockTableDesc desc; + + public UnlockTableOperation(DDLOperationContext context, UnlockTableDesc desc) { + super(context); + this.desc = desc; + } + + @Override + public int execute() throws HiveException { + Context ctx = context.getDriverContext().getCtx(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + return txnManager.unlockTable(context.getDb(), desc); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java new file mode 100644 index 0000000..6fc4730 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/table/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Table related DDL operation descriptions and operations. */ +package org.apache.hadoop.hive.ql.ddl.table; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index a56695b..f4281bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.exec; import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import java.io.DataOutputStream; import java.io.FileNotFoundException; @@ -28,25 +27,18 @@ import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; -import java.nio.charset.StandardCharsets; import java.sql.SQLException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; -import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -62,10 +54,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; -import org.apache.hadoop.hive.common.type.HiveDecimal; -import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; @@ -74,13 +63,8 @@ import org.apache.hadoop.hive.metastore.Msck; import org.apache.hadoop.hive.metastore.MsckInfo; import org.apache.hadoop.hive.metastore.PartitionDropOptions; -import org.apache.hadoop.hive.metastore.PartitionManagementTask; -import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; @@ -93,14 +77,6 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; @@ -138,8 +114,6 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcSerde; import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; -import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask; -import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork; import org.apache.hadoop.hive.ql.lockmgr.DbLockManager; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; @@ -147,30 +121,20 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; -import org.apache.hadoop.hive.ql.metadata.CheckConstraint; -import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; -import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; -import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PartitionIterable; -import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; -import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter; import org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataTable; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.CalcitePlanner; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.parse.ParseUtils; -import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; @@ -185,20 +149,16 @@ import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc; -import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; @@ -209,7 +169,6 @@ import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; @@ -225,20 +184,13 @@ import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; -import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.TezWork; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils; @@ -253,12 +205,9 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveV1Authorizer; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; -import org.apache.hadoop.hive.serde2.SerDeSpec; import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe; @@ -267,7 +216,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -276,12 +224,10 @@ import org.apache.hadoop.tools.HadoopArchives; import org.apache.hadoop.util.ToolRunner; import org.apache.hive.common.util.AnnotationUtils; -import org.apache.hive.common.util.HiveStringUtils; import org.apache.hive.common.util.ReflectionUtil; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.stringtemplate.v4.ST; /** * DDLTask implementation. @@ -302,11 +248,6 @@ private MetaDataFormatter formatter; private final HiveAuthorizationTranslator defaultAuthorizationTranslator = new DefaultHiveAuthorizationTranslator(); - private Task subtask = null; - - public Task getSubtask() { - return subtask; - } @Override public boolean requireLock() { @@ -344,19 +285,9 @@ public int execute(DriverContext driverContext) { try { db = Hive.get(conf); - CreateTableDesc crtTbl = work.getCreateTblDesc(); - if (crtTbl != null) { - return createTable(db, crtTbl); - } - - CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc(); - if (crtTblLike != null) { - return createTableLike(db, crtTblLike); - } - - DropTableDesc dropTbl = work.getDropTblDesc(); - if (dropTbl != null) { - dropTableOrPartitions(db, dropTbl); + DropPartitionDesc dropPartition = work.getDropPartitionDesc(); + if (dropPartition != null) { + dropPartitions(db, dropPartition); return 0; } @@ -410,36 +341,16 @@ public int execute(DriverContext driverContext) { return msck(db, msckDesc); } - DescTableDesc descTbl = work.getDescTblDesc(); - if (descTbl != null) { - return describeTable(db, descTbl); - } - DescFunctionDesc descFunc = work.getDescFunctionDesc(); if (descFunc != null) { return describeFunction(db, descFunc); } - ShowTablesDesc showTbls = work.getShowTblsDesc(); - if (showTbls != null) { - return showTablesOrViews(db, showTbls); - } - ShowColumnsDesc showCols = work.getShowColumnsDesc(); if (showCols != null) { return showColumns(db, showCols); } - ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc(); - if (showTblStatus != null) { - return showTableStatus(db, showTblStatus); - } - - ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc(); - if (showTblProperties != null) { - return showTableProperties(db, showTblProperties); - } - ShowFunctionsDesc showFuncs = work.getShowFuncsDesc(); if (showFuncs != null) { return showFunctions(db, showFuncs); @@ -465,31 +376,11 @@ public int execute(DriverContext driverContext) { return abortTxns(db, abortTxnsDesc); } - LockTableDesc lockTbl = work.getLockTblDesc(); - if (lockTbl != null) { - return lockTable(db, lockTbl); - } - - UnlockTableDesc unlockTbl = work.getUnlockTblDesc(); - if (unlockTbl != null) { - return unlockTable(db, unlockTbl); - } - ShowPartitionsDesc showParts = work.getShowPartsDesc(); if (showParts != null) { return showPartitions(db, showParts); } - ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc(); - if (showCreateDb != null) { - return showCreateDatabase(db, showCreateDb); - } - - ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc(); - if (showCreateTbl != null) { - return showCreateTable(db, showCreateTbl); - } - ShowConfDesc showConf = work.getShowConfDesc(); if (showConf != null) { return showConf(db, showConf); @@ -534,11 +425,6 @@ public int execute(DriverContext driverContext) { return alterTableAlterPart(db, alterPartDesc); } - TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc(); - if (truncateTableDesc != null) { - return truncateTable(db, truncateTableDesc); - } - AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition(); if (alterTableExchangePartition != null) { @@ -553,10 +439,6 @@ public int execute(DriverContext driverContext) { if (insertCommitHookDesc != null) { return insertCommitWork(db, insertCommitHookDesc); } - PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc(); - if (preInsertTableDesc != null) { - return preInsertWork(db, preInsertTableDesc); - } KillQueryDesc killQueryDesc = work.getKillQueryDesc(); if (killQueryDesc != null) { @@ -802,20 +684,6 @@ private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolM return 0; } - private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException { - try{ - HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook(); - if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { - return 0; - } - DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; - hiveMetaHook.preInsertTable(preInsertTableDesc.getTable().getTTable(), preInsertTableDesc.isOverwrite()); - } catch (MetaException e) { - throw new HiveException(e); - } - return 0; - } - private int insertCommitWork(Hive db, InsertCommitHookDesc insertCommitHookDesc) throws MetaException { boolean failed = true; HiveMetaHook hook = insertCommitHookDesc.getTable().getStorageHandler().getMetaHook(); @@ -964,7 +832,7 @@ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, // initialize the task and execute task.initialize(queryState, getQueryPlan(), driverCxt, opContext); - subtask = task; + Task subtask = task; int ret = task.execute(driverCxt); if (subtask.getException() != null) { setException(subtask.getException()); @@ -2109,373 +1977,6 @@ private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveExc return 0; } - private int showCreateDatabase(Hive db, ShowCreateDatabaseDesc showCreateDb) throws HiveException { - DataOutputStream outStream = getOutputStream(showCreateDb.getResFile()); - try { - String dbName = showCreateDb.getDatabaseName(); - return showCreateDatabase(db, outStream, dbName); - } catch (Exception e) { - throw new HiveException(e); - } finally { - IOUtils.closeStream(outStream); - } - } - - private int showCreateDatabase(Hive db, DataOutputStream outStream, String databaseName) - throws Exception { - Database database = db.getDatabase(databaseName); - - StringBuilder createDb_str = new StringBuilder(); - createDb_str.append("CREATE DATABASE `").append(database.getName()).append("`\n"); - if (database.getDescription() != null) { - createDb_str.append("COMMENT\n '"); - createDb_str.append( - HiveStringUtils.escapeHiveCommand(database.getDescription())).append("'\n"); - } - createDb_str.append("LOCATION\n '"); - createDb_str.append(database.getLocationUri()).append("'\n"); - String propertiesToString = propertiesToString(database.getParameters(), null); - if (!propertiesToString.isEmpty()) { - createDb_str.append("WITH DBPROPERTIES (\n"); - createDb_str.append(propertiesToString).append(")\n"); - } - - outStream.write(createDb_str.toString().getBytes("UTF-8")); - return 0; - } - - /** - * Write a statement of how to create a table to a file. - * - * @param db - * The database in question. - * @param showCreateTbl - * This is the table we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showCreateTable(Hive db, ShowCreateTableDesc showCreateTbl) throws HiveException { - // get the create table statement for the table and populate the output - DataOutputStream outStream = getOutputStream(showCreateTbl.getResFile()); - try { - String tableName = showCreateTbl.getTableName(); - return showCreateTable(db, outStream, tableName); - } catch (Exception e) { - throw new HiveException(e); - } finally { - IOUtils.closeStream(outStream); - } - } - - private int showCreateTable(Hive db, DataOutputStream outStream, String tableName) - throws HiveException { - final String EXTERNAL = "external"; - final String TEMPORARY = "temporary"; - final String LIST_COLUMNS = "columns"; - final String TBL_COMMENT = "tbl_comment"; - final String LIST_PARTITIONS = "partitions"; - final String SORT_BUCKET = "sort_bucket"; - final String SKEWED_INFO = "tbl_skewedinfo"; - final String ROW_FORMAT = "row_format"; - final String TBL_LOCATION = "tbl_location"; - final String TBL_PROPERTIES = "tbl_properties"; - boolean needsLocation = true; - StringBuilder createTab_str = new StringBuilder(); - - Table tbl = db.getTable(tableName, false); - List duplicateProps = new ArrayList(); - try { - needsLocation = doesTableNeedLocation(tbl); - - if (tbl.isView()) { - String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + - tbl.getViewExpandedText(); - outStream.write(createTab_stmt.getBytes(StandardCharsets.UTF_8)); - return 0; - } - - createTab_str.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `"); - createTab_str.append(tableName + "`(\n"); - createTab_str.append("<" + LIST_COLUMNS + ">)\n"); - createTab_str.append("<" + TBL_COMMENT + ">\n"); - createTab_str.append("<" + LIST_PARTITIONS + ">\n"); - createTab_str.append("<" + SORT_BUCKET + ">\n"); - createTab_str.append("<" + SKEWED_INFO + ">\n"); - createTab_str.append("<" + ROW_FORMAT + ">\n"); - if (needsLocation) { - createTab_str.append("LOCATION\n"); - createTab_str.append("<" + TBL_LOCATION + ">\n"); - } - createTab_str.append("TBLPROPERTIES (\n"); - createTab_str.append("<" + TBL_PROPERTIES + ">)\n"); - ST createTab_stmt = new ST(createTab_str.toString()); - - // For cases where the table is temporary - String tbl_temp = ""; - if (tbl.isTemporary()) { - duplicateProps.add("TEMPORARY"); - tbl_temp = "TEMPORARY "; - } - // For cases where the table is external - String tbl_external = ""; - if (tbl.getTableType() == TableType.EXTERNAL_TABLE) { - duplicateProps.add("EXTERNAL"); - tbl_external = "EXTERNAL "; - } - - // Columns - String tbl_columns = ""; - List cols = tbl.getCols(); - List columns = new ArrayList(); - for (FieldSchema col : cols) { - String columnDesc = " `" + col.getName() + "` " + col.getType(); - if (col.getComment() != null) { - columnDesc = columnDesc + " COMMENT '" - + HiveStringUtils.escapeHiveCommand(col.getComment()) + "'"; - } - columns.add(columnDesc); - } - tbl_columns = StringUtils.join(columns, ", \n"); - - // Table comment - String tbl_comment = ""; - String tabComment = tbl.getProperty("comment"); - if (tabComment != null) { - duplicateProps.add("comment"); - tbl_comment = "COMMENT '" - + HiveStringUtils.escapeHiveCommand(tabComment) + "'"; - } - - // Partitions - String tbl_partitions = ""; - List partKeys = tbl.getPartitionKeys(); - if (partKeys.size() > 0) { - tbl_partitions += "PARTITIONED BY ( \n"; - List partCols = new ArrayList(); - for (FieldSchema partKey : partKeys) { - String partColDesc = " `" + partKey.getName() + "` " + partKey.getType(); - if (partKey.getComment() != null) { - partColDesc = partColDesc + " COMMENT '" - + HiveStringUtils.escapeHiveCommand(partKey.getComment()) + "'"; - } - partCols.add(partColDesc); - } - tbl_partitions += StringUtils.join(partCols, ", \n"); - tbl_partitions += ")"; - } - - // Clusters (Buckets) - String tbl_sort_bucket = ""; - List buckCols = tbl.getBucketCols(); - if (buckCols.size() > 0) { - duplicateProps.add("SORTBUCKETCOLSPREFIX"); - tbl_sort_bucket += "CLUSTERED BY ( \n "; - tbl_sort_bucket += StringUtils.join(buckCols, ", \n "); - tbl_sort_bucket += ") \n"; - List sortCols = tbl.getSortCols(); - if (sortCols.size() > 0) { - tbl_sort_bucket += "SORTED BY ( \n"; - // Order - List sortKeys = new ArrayList(); - for (Order sortCol : sortCols) { - String sortKeyDesc = " " + sortCol.getCol() + " "; - if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { - sortKeyDesc = sortKeyDesc + "ASC"; - } - else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { - sortKeyDesc = sortKeyDesc + "DESC"; - } - sortKeys.add(sortKeyDesc); - } - tbl_sort_bucket += StringUtils.join(sortKeys, ", \n"); - tbl_sort_bucket += ") \n"; - } - tbl_sort_bucket += "INTO " + tbl.getNumBuckets() + " BUCKETS"; - } - - // Skewed Info - StringBuilder tbl_skewedinfo = new StringBuilder(); - SkewedInfo skewedInfo = tbl.getSkewedInfo(); - if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) { - tbl_skewedinfo.append("SKEWED BY (" + StringUtils.join(skewedInfo.getSkewedColNames(), ",") + ")\n"); - tbl_skewedinfo.append(" ON ("); - List colValueList = new ArrayList(); - for (List colValues : skewedInfo.getSkewedColValues()) { - colValueList.add("('" + StringUtils.join(colValues, "','") + "')"); - } - tbl_skewedinfo.append(StringUtils.join(colValueList, ",") + ")"); - if (tbl.isStoredAsSubDirectories()) { - tbl_skewedinfo.append("\n STORED AS DIRECTORIES"); - } - } - - // Row format (SerDe) - StringBuilder tbl_row_format = new StringBuilder(); - StorageDescriptor sd = tbl.getTTable().getSd(); - SerDeInfo serdeInfo = sd.getSerdeInfo(); - Map serdeParams = serdeInfo.getParameters(); - tbl_row_format.append("ROW FORMAT SERDE \n"); - tbl_row_format.append(" '" - + HiveStringUtils.escapeHiveCommand(serdeInfo.getSerializationLib()) + "' \n"); - if (tbl.getStorageHandler() == null) { - // If serialization.format property has the default value, it will not to be included in - // SERDE properties - if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get( - serdeConstants.SERIALIZATION_FORMAT))){ - serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); - } - if (!serdeParams.isEmpty()) { - appendSerdeParams(tbl_row_format, serdeParams).append(" \n"); - } - tbl_row_format.append("STORED AS INPUTFORMAT \n '" - + HiveStringUtils.escapeHiveCommand(sd.getInputFormat()) + "' \n"); - tbl_row_format.append("OUTPUTFORMAT \n '" - + HiveStringUtils.escapeHiveCommand(sd.getOutputFormat()) + "'"); - } else { - duplicateProps.add(META_TABLE_STORAGE); - tbl_row_format.append("STORED BY \n '" - + HiveStringUtils.escapeHiveCommand(tbl.getParameters().get( - META_TABLE_STORAGE)) + "' \n"); - // SerDe Properties - if (!serdeParams.isEmpty()) { - appendSerdeParams(tbl_row_format, serdeInfo.getParameters()); - } - } - String tbl_location = " '" + HiveStringUtils.escapeHiveCommand(sd.getLocation()) + "'"; - - // Table properties - duplicateProps.addAll(StatsSetupConst.TABLE_PARAMS_STATS_KEYS); - String tbl_properties = propertiesToString(tbl.getParameters(), duplicateProps); - - createTab_stmt.add(TEMPORARY, tbl_temp); - createTab_stmt.add(EXTERNAL, tbl_external); - createTab_stmt.add(LIST_COLUMNS, tbl_columns); - createTab_stmt.add(TBL_COMMENT, tbl_comment); - createTab_stmt.add(LIST_PARTITIONS, tbl_partitions); - createTab_stmt.add(SORT_BUCKET, tbl_sort_bucket); - createTab_stmt.add(SKEWED_INFO, tbl_skewedinfo); - createTab_stmt.add(ROW_FORMAT, tbl_row_format); - // Table location should not be printed with hbase backed tables - if (needsLocation) { - createTab_stmt.add(TBL_LOCATION, tbl_location); - } - createTab_stmt.add(TBL_PROPERTIES, tbl_properties); - - outStream.write(createTab_stmt.render().getBytes(StandardCharsets.UTF_8)); - } catch (IOException e) { - LOG.info("show create table: ", e); - return 1; - } - - return 0; - } - - private String propertiesToString(Map props, List exclude) { - String prop_string = ""; - if (!props.isEmpty()) { - Map properties = new TreeMap(props); - List realProps = new ArrayList(); - for (String key : properties.keySet()) { - if (properties.get(key) != null && (exclude == null || !exclude.contains(key))) { - realProps.add(" '" + key + "'='" + - HiveStringUtils.escapeHiveCommand(properties.get(key)) + "'"); - } - } - prop_string += StringUtils.join(realProps, ", \n"); - } - return prop_string; - } - - public static StringBuilder appendSerdeParams( - StringBuilder builder, Map serdeParam) { - serdeParam = new TreeMap(serdeParam); - builder.append("WITH SERDEPROPERTIES ( \n"); - List serdeCols = new ArrayList(); - for (Entry entry : serdeParam.entrySet()) { - serdeCols.add(" '" + entry.getKey() + "'='" - + HiveStringUtils.escapeHiveCommand(entry.getValue()) + "'"); - } - builder.append(StringUtils.join(serdeCols, ", \n")).append(')'); - return builder; - } - - /** - * Write a list of the tables/views in the database to a file. - * - * @param db - * The database in context. - * @param showDesc - * A ShowTablesDesc for tables or views we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showTablesOrViews(Hive db, ShowTablesDesc showDesc) throws HiveException { - // get the tables/views for the desired pattern - populate the output stream - List tableNames = null; - List
tableObjects = null; - - TableType type = showDesc.getType(); // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs - String dbName = showDesc.getDbName(); - String pattern = showDesc.getPattern(); // if null, all tables/views are returned - TableType typeFilter = showDesc.getTypeFilter(); - String resultsFile = showDesc.getResFile(); - boolean isExtended = showDesc.isExtended(); - - if (!db.databaseExists(dbName)) { - throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName); - } - - LOG.debug("pattern: {}", pattern); - LOG.debug("typeFilter: {}", typeFilter); - if (type == null) { - if (isExtended) { - tableObjects = new ArrayList<>(); - tableObjects.addAll(db.getTableObjectsByType(dbName, pattern, typeFilter)); - LOG.debug("Found {} table(s) matching the SHOW EXTENDED TABLES statement.", tableObjects.size()); - } else { - tableNames = db.getTablesByType(dbName, pattern, typeFilter); - LOG.debug("Found {} table(s) matching the SHOW TABLES statement.", tableNames.size()); - } - } else if (type == TableType.MATERIALIZED_VIEW) { - tableObjects = new ArrayList<>(); - tableObjects.addAll(db.getMaterializedViewObjectsByPattern(dbName, pattern)); - LOG.debug("Found {} materialized view(s) matching the SHOW MATERIALIZED VIEWS statement.", tableObjects.size()); - } else if (type == TableType.VIRTUAL_VIEW) { - tableNames = db.getTablesByType(dbName, pattern, type); - LOG.debug("Found {} view(s) matching the SHOW VIEWS statement.", tableNames.size()); - } else { - throw new HiveException("Option not recognized in SHOW TABLES/VIEWS/MATERIALIZED VIEWS"); - } - - // write the results in the file - DataOutputStream outStream = null; - try { - Path resFile = new Path(resultsFile); - FileSystem fs = resFile.getFileSystem(conf); - outStream = fs.create(resFile); - // Sort by name and print - if (tableNames != null) { - SortedSet sortedSet = new TreeSet(tableNames); - formatter.showTables(outStream, sortedSet); - } else { - Collections.sort(tableObjects, Comparator.comparing(Table::getTableName)); - if (isExtended) { - formatter.showTablesExtended(outStream, tableObjects); - } else { - formatter.showMaterializedViews(outStream, tableObjects); - } - } - outStream.close(); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - /** * Write a list of the columns in the table to a file. * @@ -2949,38 +2450,6 @@ private int killQuery(Hive db, KillQueryDesc desc) throws HiveException { return 0; } - /** - * Lock the table/partition specified - * @param db - * - * @param lockTbl - * the table/partition to be locked along with the mode - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException { - Context ctx = driverContext.getCtx(); - HiveTxnManager txnManager = ctx.getHiveTxnManager(); - return txnManager.lockTable(db, lockTbl); - } - - /** - * Unlock the table/partition specified - * @param db - * - * @param unlockTbl - * the table/partition to be unlocked - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int unlockTable(Hive db, UnlockTableDesc unlockTbl) throws HiveException { - Context ctx = driverContext.getCtx(); - HiveTxnManager txnManager = ctx.getHiveTxnManager(); - return txnManager.unlockTable(db, unlockTbl); - } - /** * Shows a description of a function. * @param db @@ -3055,117 +2524,6 @@ private int describeFunction(Hive db, DescFunctionDesc descFunc) throws HiveExce return 0; } - /** - * Write the status of tables to a file. - * - * @param db - * The database in question. - * @param showTblStatus - * tables we are interested in - * @return Return 0 when execution succeeds and above 0 if it fails. - */ - private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException { - // get the tables for the desired pattern - populate the output stream - List
tbls = new ArrayList
(); - Map part = showTblStatus.getPartSpec(); - Partition par = null; - if (part != null) { - Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern()); - par = db.getPartition(tbl, part, false); - if (par == null) { - throw new HiveException("Partition " + part + " for table " - + showTblStatus.getPattern() + " does not exist."); - } - tbls.add(tbl); - } else { - LOG.debug("pattern: {}", showTblStatus.getPattern()); - List tblStr = db.getTablesForDb(showTblStatus.getDbName(), - showTblStatus.getPattern()); - SortedSet sortedTbls = new TreeSet(tblStr); - Iterator iterTbls = sortedTbls.iterator(); - while (iterTbls.hasNext()) { - // create a row per table name - String tblName = iterTbls.next(); - Table tbl = db.getTable(showTblStatus.getDbName(), tblName); - tbls.add(tbl); - } - LOG.info("Found {} table(s) matching the SHOW TABLE EXTENDED statement.", tblStr.size()); - } - - // write the results in the file - DataOutputStream outStream = getOutputStream(showTblStatus.getResFile()); - try { - formatter.showTableStatus(outStream, db, conf, tbls, part, par); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show table status"); - } finally { - IOUtils.closeStream(outStream); - } - return 0; - } - - /** - * Write the properties of a table to a file. - * - * @param db - * The database in question. - * @param showTblPrpt - * This is the table we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int showTableProperties(Hive db, ShowTblPropertiesDesc showTblPrpt) throws HiveException { - String tableName = showTblPrpt.getTableName(); - - // show table properties - populate the output stream - Table tbl = db.getTable(tableName, false); - try { - if (tbl == null) { - String errMsg = "Table " + tableName + " does not exist"; - writeToFile(errMsg, showTblPrpt.getResFile()); - return 0; - } - - LOG.info("DDLTask: show properties for {}", tableName); - - StringBuilder builder = new StringBuilder(); - String propertyName = showTblPrpt.getPropertyName(); - if (propertyName != null) { - String propertyValue = tbl.getProperty(propertyName); - if (propertyValue == null) { - String errMsg = "Table " + tableName + " does not have property: " + propertyName; - builder.append(errMsg); - } - else { - appendNonNull(builder, propertyName, true); - appendNonNull(builder, propertyValue); - } - } - else { - Map properties = new TreeMap(tbl.getParameters()); - for (Entry entry : properties.entrySet()) { - appendNonNull(builder, entry.getKey(), true); - appendNonNull(builder, entry.getValue()); - } - } - - LOG.info("DDLTask: written data for showing properties of {}", tableName); - writeToFile(builder.toString(), showTblPrpt.getResFile()); - - } catch (FileNotFoundException e) { - LOG.info("show table properties: ", e); - return 1; - } catch (IOException e) { - LOG.info("show table properties: ", e); - return 1; - } catch (Exception e) { - throw new HiveException(e); - } - - return 0; - } - private void writeToFile(String data, String file) throws IOException { Path resFile = new Path(file); FileSystem fs = resFile.getFileSystem(conf); @@ -3182,211 +2540,7 @@ private void writeToFile(String data, String file) throws IOException { } } - /** - * Write the description of a table to a file. - * - * @param db - * The database in question. - * @param descTbl - * This is the table we're interested in. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - * @throws MetaException - */ - private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, MetaException { - String colPath = descTbl.getColumnPath(); - String tableName = descTbl.getTableName(); - - // describe the table - populate the output stream - Table tbl = db.getTable(tableName, false); - if (tbl == null) { - throw new HiveException(ErrorMsg.INVALID_TABLE, tableName); - } - Partition part = null; - if (descTbl.getPartSpec() != null) { - part = db.getPartition(tbl, descTbl.getPartSpec(), false); - if (part == null) { - throw new HiveException(ErrorMsg.INVALID_PARTITION, - StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName); - } - tbl = part.getTable(); - } - - DataOutputStream outStream = getOutputStream(descTbl.getResFile()); - try { - LOG.debug("DDLTask: got data for {}", tableName); - - List cols = null; - List colStats = null; - - Deserializer deserializer = tbl.getDeserializer(true); - if (deserializer instanceof AbstractSerDe) { - String errorMsgs = ((AbstractSerDe) deserializer).getConfigurationErrors(); - if (errorMsgs != null && !errorMsgs.isEmpty()) { - throw new SQLException(errorMsgs); - } - } - - if (colPath.equals(tableName)) { - cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ? - tbl.getCols() : part.getCols(); - - if (!descTbl.isFormatted()) { - cols.addAll(tbl.getPartCols()); - } - - if (tbl.isPartitioned() && part == null) { - // No partitioned specified for partitioned table, lets fetch all. - Map tblProps = tbl.getParameters() == null ? new HashMap() : tbl.getParameters(); - Map valueMap = new HashMap<>(); - Map stateMap = new HashMap<>(); - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - valueMap.put(stat, 0L); - stateMap.put(stat, true); - } - PartitionIterable parts = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - int numParts = 0; - for (Partition partition : parts) { - Map props = partition.getParameters(); - Boolean state = StatsSetupConst.areBasicStatsUptoDate(props); - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - stateMap.put(stat, stateMap.get(stat) && state); - if (props != null && props.get(stat) != null) { - valueMap.put(stat, valueMap.get(stat) + Long.parseLong(props.get(stat))); - } - } - numParts++; - } - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat))); - tblProps.put(stat, valueMap.get(stat).toString()); - } - tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts)); - tbl.setParameters(tblProps); - } - } else { - if (descTbl.isFormatted()) { - // when column name is specified in describe table DDL, colPath will - // will be table_name.column_name - String colName = colPath.split("\\.")[1]; - String[] dbTab = Utilities.getDbTableName(tableName); - List colNames = new ArrayList(); - colNames.add(colName.toLowerCase()); - if (null == part) { - if (tbl.isPartitioned()) { - Map tblProps = tbl.getParameters() == null ? new HashMap() : tbl.getParameters(); - if (tbl.isPartitionKey(colNames.get(0))) { - FieldSchema partCol = tbl.getPartColByName(colNames.get(0)); - cols = Collections.singletonList(partCol); - PartitionIterable parts = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - ColumnInfo ci = new ColumnInfo(partCol.getName(),TypeInfoUtils.getTypeInfoFromTypeString(partCol.getType()),null,false); - ColStatistics cs = StatsUtils.getColStatsForPartCol(ci, parts, conf); - ColumnStatisticsData data = new ColumnStatisticsData(); - ColStatistics.Range r = cs.getRange(); - StatObjectConverter.fillColumnStatisticsData(partCol.getType(), data, r == null ? null : r.minValue, r == null ? null : r.maxValue, - r == null ? null : r.minValue, r == null ? null : r.maxValue, r == null ? null : r.minValue.toString(), r == null ? null : r.maxValue.toString(), - cs.getNumNulls(), cs.getCountDistint(), null, cs.getAvgColLen(), cs.getAvgColLen(), cs.getNumTrues(), cs.getNumFalses()); - ColumnStatisticsObj cso = new ColumnStatisticsObj(partCol.getName(), partCol.getType(), data); - colStats = Collections.singletonList(cso); - StatsSetupConst.setColumnStatsState(tblProps, colNames); - } else { - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - List parts = db.getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), (short) -1); - AggrStats aggrStats = db.getAggrColStatsFor( - dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false); - colStats = aggrStats.getColStats(); - if (parts.size() == aggrStats.getPartsFound()) { - StatsSetupConst.setColumnStatsState(tblProps, colNames); - } else { - StatsSetupConst.removeColumnStatsState(tblProps, colNames); - } - } - tbl.setParameters(tblProps); - } else { - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getTableColumnStatistics( - dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false); - } - } else { - List partitions = new ArrayList(); - partitions.add(part.getName()); - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(), - dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName()); - } - } else { - cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - } - } - PrimaryKeyInfo pkInfo = null; - ForeignKeyInfo fkInfo = null; - UniqueConstraint ukInfo = null; - NotNullConstraint nnInfo = null; - DefaultConstraint dInfo = null; - CheckConstraint cInfo = null; - StorageHandlerInfo storageHandlerInfo = null; - if (descTbl.isExt() || descTbl.isFormatted()) { - pkInfo = db.getPrimaryKeys(tbl.getDbName(), tbl.getTableName()); - fkInfo = db.getForeignKeys(tbl.getDbName(), tbl.getTableName()); - ukInfo = db.getUniqueConstraints(tbl.getDbName(), tbl.getTableName()); - nnInfo = db.getNotNullConstraints(tbl.getDbName(), tbl.getTableName()); - dInfo = db.getDefaultConstraints(tbl.getDbName(), tbl.getTableName()); - cInfo = db.getCheckConstraints(tbl.getDbName(), tbl.getTableName()); - storageHandlerInfo = db.getStorageHandlerInfo(tbl); - } - fixDecimalColumnTypeName(cols); - // Information for materialized views - if (tbl.isMaterializedView()) { - final String validTxnsList = db.getConf().get(ValidTxnList.VALID_TXNS_KEY); - if (validTxnsList != null) { - final List tablesUsed = - new ArrayList<>(tbl.getCreationMetadata().getTablesUsed()); - final ValidTxnWriteIdList currentTxnWriteIds = - SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList); - final long defaultTimeWindow = - HiveConf.getTimeVar(db.getConf(), HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW, - TimeUnit.MILLISECONDS); - tbl.setOutdatedForRewriting(Hive.isOutdatedMaterializedView(tbl, - currentTxnWriteIds, defaultTimeWindow, tablesUsed, false)); - } - } - // In case the query is served by HiveServer2, don't pad it with spaces, - // as HiveServer2 output is consumed by JDBC/ODBC clients. - boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); - formatter.describeTable(outStream, colPath, tableName, tbl, part, - cols, descTbl.isFormatted(), descTbl.isExt(), isOutputPadded, - colStats, pkInfo, fkInfo, ukInfo, nnInfo, dInfo, cInfo, - storageHandlerInfo); - - LOG.debug("DDLTask: written data for {}", tableName); - - } catch (SQLException e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName); - } finally { - IOUtils.closeStream(outStream); - } - - return 0; - } - - /** - * Fix the type name of a column of type decimal w/o precision/scale specified. This makes - * the describe table show "decimal(10,0)" instead of "decimal" even if the type stored - * in metastore is "decimal", which is possible with previous hive. - * - * @param cols columns that to be fixed as such - */ - private static void fixDecimalColumnTypeName(List cols) { - for (FieldSchema col : cols) { - if (serdeConstants.DECIMAL_TYPE_NAME.equals(col.getType())) { - col.setType(DecimalTypeInfo.getQualifiedName(HiveDecimal.USER_DEFAULT_PRECISION, - HiveDecimal.USER_DEFAULT_SCALE)); - } - } - } - - static String writeGrantInfo(List privileges, boolean testMode) { + private String writeGrantInfo(List privileges, boolean testMode) { if (privileges == null || privileges.isEmpty()) { return ""; } @@ -3425,23 +2579,7 @@ public int compare(HivePrivilegeInfo o1, HivePrivilegeInfo o2) { return builder.toString(); } - static String writeRoleGrantsInfo(List roleGrants, boolean testMode) { - if (roleGrants == null || roleGrants.isEmpty()) { - return ""; - } - StringBuilder builder = new StringBuilder(); - //sort the list to get sorted (deterministic) output (for ease of testing) - Collections.sort(roleGrants); - for (RolePrincipalGrant roleGrant : roleGrants) { - appendNonNull(builder, roleGrant.getRoleName(), true); - appendNonNull(builder, roleGrant.isGrantOption()); - appendNonNull(builder, testMode ? -1 : roleGrant.getGrantTime() * 1000L); - appendNonNull(builder, roleGrant.getGrantorName()); - } - return builder.toString(); - } - - static String writeRolesGrantedInfo(List roles, boolean testMode) { + private String writeRolesGrantedInfo(List roles, boolean testMode) { if (roles == null || roles.isEmpty()) { return ""; } @@ -3457,11 +2595,11 @@ static String writeRolesGrantedInfo(List roles, boolean testMode) return builder.toString(); } - static StringBuilder appendNonNull(StringBuilder builder, Object value) { + private StringBuilder appendNonNull(StringBuilder builder, Object value) { return appendNonNull(builder, value, false); } - static StringBuilder appendNonNull(StringBuilder builder, Object value, boolean firstColumn) { + private StringBuilder appendNonNull(StringBuilder builder, Object value, boolean firstColumn) { if (!firstColumn) { builder.append((char)separator); } else if (builder.length() > 0) { @@ -4181,35 +3319,26 @@ private int updateColumns(Table tbl, Partition part) } /** - * Drop a given table or some partitions. DropTableDesc is currently used for both. + * Drop a given partitions. * * @param db * The database in question. - * @param dropTbl - * This is the table we're dropping. + * @param dropPartition + * This is the partition we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { + private void dropPartitions(Hive db, DropPartitionDesc dropPartition) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { - tbl = db.getTable(dropTbl.getTableName()); + tbl = db.getTable(dropPartition.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } - if (dropTbl.getPartSpecs() == null) { - dropTable(db, tbl, dropTbl); - } else { - dropPartitions(db, tbl, dropTbl); - } - } - - private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { - - ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); + ReplicationSpec replicationSpec = dropPartition.getReplicationSpec(); if (replicationSpec.isInReplicationScope()){ /** * ALTER TABLE DROP PARTITION ... FOR REPLICATION(x) behaves as a DROP PARTITION IF OLDER THAN x @@ -4231,7 +3360,7 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi return; } - for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()){ + for (DropPartitionDesc.PartSpec partSpec : dropPartition.getPartSpecs()){ List partitions = new ArrayList<>(); try { db.getPartitionsByExpr(tbl, partSpec.getPartSpec(), conf, partitions); @@ -4250,12 +3379,12 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi // ifExists is currently verified in DDLSemanticAnalyzer List droppedParts - = db.dropPartitions(dropTbl.getTableName(), - dropTbl.getPartSpecs(), + = db.dropPartitions(dropPartition.getTableName(), + dropPartition.getPartSpecs(), PartitionDropOptions.instance() .deleteData(true) .ifExists(true) - .purgeData(dropTbl.getIfPurge())); + .purgeData(dropPartition.getIfPurge())); for (Partition partition : droppedParts) { console.printInfo("Dropped the partition " + partition.getName()); // We have already locked the table, don't lock the partitions. @@ -4263,102 +3392,6 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi } } - private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { - // This is a true DROP TABLE - if (tbl != null && dropTbl.getValidationRequired()) { - if (tbl.isView()) { - if (!dropTbl.getExpectView()) { - if (dropTbl.getIfExists()) { - return; - } - if (dropTbl.getExpectMaterializedView()) { - throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW"); - } else { - throw new HiveException("Cannot drop a view with DROP TABLE"); - } - } - } else if (tbl.isMaterializedView()) { - if (!dropTbl.getExpectMaterializedView()) { - if (dropTbl.getIfExists()) { - return; - } - if (dropTbl.getExpectView()) { - throw new HiveException("Cannot drop a materialized view with DROP VIEW"); - } else { - throw new HiveException("Cannot drop a materialized view with DROP TABLE"); - } - } - } else { - if (dropTbl.getExpectView()) { - if (dropTbl.getIfExists()) { - return; - } - throw new HiveException( - "Cannot drop a base table with DROP VIEW"); - } else if (dropTbl.getExpectMaterializedView()) { - if (dropTbl.getIfExists()) { - return; - } - throw new HiveException( - "Cannot drop a base table with DROP MATERIALIZED VIEW"); - } - } - } - - ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); - if ((tbl!= null) && replicationSpec.isInReplicationScope()){ - /** - * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely - * matches a DROP TABLE IF OLDER THAN(x) semantic. - * - * Ideally, commands executed under the scope of replication need to be idempotent and resilient - * to repeats. What can happen, sometimes, is that a drone processing a replication task can - * have been abandoned for not returning in time, but still execute its task after a while, - * which should not result in it mucking up data that has been impressed later on. So, for eg., - * if we create partition P1, followed by droppping it, followed by creating it yet again, - * the replication of that drop should not drop the newer partition if it runs after the destination - * object is already in the newer state. - * - * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can - * drop the object in question(will return false if object is newer than the event, true if not) - * - * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP - * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must - * do one more thing - if it does not drop the table because the table is in a newer state, it must - * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL - * acts like a recursive DROP TABLE IF OLDER. - */ - if (!replicationSpec.allowEventReplacementInto(tbl.getParameters())){ - // Drop occured as part of replicating a drop, but the destination - // table was newer than the event being replicated. Ignore, but drop - // any partitions inside that are older. - if (tbl.isPartitioned()){ - - PartitionIterable partitions = new PartitionIterable(db,tbl,null, - conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - - for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())){ - db.dropPartition(tbl.getDbName(),tbl.getTableName(),p.getValues(),true); - } - } - LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", dropTbl.getTableName()); - return; // table is newer, leave it be. - } - } - - // drop the table - // TODO: API w/catalog name - db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); - if (tbl != null) { - // Remove from cache if it is a materialized view - if (tbl.isMaterializedView()) { - HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl); - } - // We have already locked the table in DDLSemanticAnalyzer, don't do it again here - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - } - } - /** * Update last_modified_by and last_modified_time parameters in parameter map. * @@ -4375,10 +3408,6 @@ private boolean updateModifiedParameters(Map params, HiveConf co return true; } - private void validateSerDe(String serdeName) throws HiveException { - validateSerDe(serdeName, conf); - } - /** * Check if the given serde is valid. */ @@ -4396,257 +3425,6 @@ public static void validateSerDe(String serdeName, HiveConf conf) throws HiveExc } /** - * Create a new table. - * - * @param db - * The database in question. - * @param crtTbl - * This is the table we're creating. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { - // create the table - Table tbl = crtTbl.toTable(conf); - List primaryKeys = crtTbl.getPrimaryKeys(); - List foreignKeys = crtTbl.getForeignKeys(); - List uniqueConstraints = crtTbl.getUniqueConstraints(); - List notNullConstraints = crtTbl.getNotNullConstraints(); - List defaultConstraints = crtTbl.getDefaultConstraints(); - List checkConstraints = crtTbl.getCheckConstraints(); - LOG.debug("creating table {} on {}",tbl.getFullyQualifiedName(),tbl.getDataLocation()); - - if (crtTbl.getReplicationSpec().isInReplicationScope() && (!crtTbl.getReplaceMode())){ - // if this is a replication spec, then replace-mode semantics might apply. - // if we're already asking for a table replacement, then we can skip this check. - // however, otherwise, if in replication scope, and we've not been explicitly asked - // to replace, we should check if the object we're looking at exists, and if so, - // trigger replace-mode semantics. - Table existingTable = db.getTable(tbl.getDbName(), tbl.getTableName(), false); - if (existingTable != null){ - if (crtTbl.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())){ - crtTbl.setReplaceMode(true); // we replace existing table. - ReplicationSpec.copyLastReplId(existingTable.getParameters(), tbl.getParameters()); - } else { - LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", - crtTbl.getTableName()); - return 0; // no replacement, the existing table state is newer than our update. - } - } - } - - // create the table - if (crtTbl.getReplaceMode()) { - ReplicationSpec replicationSpec = crtTbl.getReplicationSpec(); - long writeId = 0; - EnvironmentContext environmentContext = null; - if (replicationSpec != null && replicationSpec.isInReplicationScope()) { - if (replicationSpec.isMigratingToTxnTable()) { - // for migration we start the transaction and allocate write id in repl txn task for migration. - String writeIdPara = conf.get(ReplUtils.REPL_CURRENT_TBL_WRITE_ID); - if (writeIdPara == null) { - throw new HiveException("DDLTask : Write id is not set in the config by open txn task for migration"); - } - writeId = Long.parseLong(writeIdPara); - } else { - writeId = crtTbl.getReplWriteId(); - } - - // In case of replication statistics is obtained from the source, so do not update those - // on replica. Since we are not replicating statisics for transactional tables, do not do - // so for transactional tables right now. - if (!AcidUtils.isTransactionalTable(crtTbl)) { - environmentContext = new EnvironmentContext(); - environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - } - } - - // replace-mode creates are really alters using CreateTableDesc. - db.alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, false, - environmentContext, true, writeId); - } else { - if ((foreignKeys != null && foreignKeys.size() > 0) || - (primaryKeys != null && primaryKeys.size() > 0) || - (uniqueConstraints != null && uniqueConstraints.size() > 0) || - (notNullConstraints != null && notNullConstraints.size() > 0) || - (checkConstraints!= null && checkConstraints.size() > 0) || - defaultConstraints != null && defaultConstraints.size() > 0) { - db.createTable(tbl, crtTbl.getIfNotExists(), primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); - } else { - db.createTable(tbl, crtTbl.getIfNotExists()); - } - Long mmWriteId = crtTbl.getInitialMmWriteId(); - if (crtTbl.isCTAS() || mmWriteId != null) { - Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName()); - if (crtTbl.isCTAS()) { - DataContainer dc = new DataContainer(createdTable.getTTable()); - queryState.getLineageState().setLineage( - createdTable.getPath(), dc, createdTable.getCols() - ); - } - } - } - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - return 0; - } - - - /** - * Create a new table like an existing table. - * - * @param db - * The database in question. - * @param crtTbl - * This is the table we're creating. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - * Throws this exception if an unexpected error occurs. - */ - private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws Exception { - // Get the existing table - Table oldtbl = db.getTable(crtTbl.getLikeTableName()); - Table tbl; - if (oldtbl.getTableType() == TableType.VIRTUAL_VIEW || - oldtbl.getTableType() == TableType.MATERIALIZED_VIEW) { - String targetTableName = crtTbl.getTableName(); - tbl=db.newTable(targetTableName); - - if (crtTbl.getTblProps() != null) { - tbl.getTTable().getParameters().putAll(crtTbl.getTblProps()); - } - - tbl.setTableType(TableType.MANAGED_TABLE); - - if (crtTbl.isExternal()) { - tbl.setProperty("EXTERNAL", "TRUE"); - tbl.setTableType(TableType.EXTERNAL_TABLE); - // partition discovery is on by default - tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - } - - tbl.setFields(oldtbl.getCols()); - tbl.setPartCols(oldtbl.getPartCols()); - - if (crtTbl.getDefaultSerName() == null) { - LOG.info("Default to LazySimpleSerDe for table {}", targetTableName); - tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - } else { - // let's validate that the serde exists - validateSerDe(crtTbl.getDefaultSerName()); - tbl.setSerializationLib(crtTbl.getDefaultSerName()); - } - - if (crtTbl.getDefaultSerdeProps() != null) { - Iterator> iter = crtTbl.getDefaultSerdeProps().entrySet() - .iterator(); - while (iter.hasNext()) { - Entry m = iter.next(); - tbl.setSerdeParam(m.getKey(), m.getValue()); - } - } - - tbl.setInputFormatClass(crtTbl.getDefaultInputFormat()); - tbl.setOutputFormatClass(crtTbl.getDefaultOutputFormat()); - - tbl.getTTable().getSd().setInputFormat( - tbl.getInputFormatClass().getName()); - tbl.getTTable().getSd().setOutputFormat( - tbl.getOutputFormatClass().getName()); - } else { - tbl=oldtbl; - - // find out database name and table name of target table - String targetTableName = crtTbl.getTableName(); - String[] names = Utilities.getDbTableName(targetTableName); - - tbl.setDbName(names[0]); - tbl.setTableName(names[1]); - - // using old table object, hence reset the owner to current user for new table. - tbl.setOwner(SessionState.getUserFromAuthenticator()); - - if (crtTbl.getLocation() != null) { - tbl.setDataLocation(new Path(crtTbl.getLocation())); - } else { - tbl.unsetDataLocation(); - } - - Class serdeClass = oldtbl.getDeserializerClass(); - - Map params = tbl.getParameters(); - // We should copy only those table parameters that are specified in the config. - SerDeSpec spec = AnnotationUtils.getAnnotation(serdeClass, SerDeSpec.class); - String paramsStr = HiveConf.getVar(conf, HiveConf.ConfVars.DDL_CTL_PARAMETERS_WHITELIST); - - Set retainer = new HashSet(); - // for non-native table, property storage_handler should be retained - retainer.add(META_TABLE_STORAGE); - if (spec != null && spec.schemaProps() != null) { - retainer.addAll(Arrays.asList(spec.schemaProps())); - } - if (paramsStr != null) { - retainer.addAll(Arrays.asList(paramsStr.split(","))); - } - if (!retainer.isEmpty()) { - params.keySet().retainAll(retainer); - } else { - params.clear(); - } - - if (crtTbl.getTblProps() != null) { - params.putAll(crtTbl.getTblProps()); - } - - if (crtTbl.isUserStorageFormat()) { - tbl.setInputFormatClass(crtTbl.getDefaultInputFormat()); - tbl.setOutputFormatClass(crtTbl.getDefaultOutputFormat()); - tbl.getTTable().getSd().setInputFormat( - tbl.getInputFormatClass().getName()); - tbl.getTTable().getSd().setOutputFormat( - tbl.getOutputFormatClass().getName()); - if (crtTbl.getDefaultSerName() == null) { - LOG.info("Default to LazySimpleSerDe for like table {}", targetTableName); - tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - } else { - // let's validate that the serde exists - validateSerDe(crtTbl.getDefaultSerName()); - tbl.setSerializationLib(crtTbl.getDefaultSerName()); - } - } - - tbl.getTTable().setTemporary(crtTbl.isTemporary()); - tbl.getTTable().unsetId(); - - if (crtTbl.isExternal()) { - tbl.setProperty("EXTERNAL", "TRUE"); - tbl.setTableType(TableType.EXTERNAL_TABLE); - // partition discovery is on by default - tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - } else { - tbl.getParameters().remove("EXTERNAL"); - } - } - - // If location is specified - ensure that it is a full qualified name - if (DDLTask.doesTableNeedLocation(tbl)) { - makeLocationQualified(tbl.getDbName(), tbl, conf); - } - - if (crtTbl.getLocation() == null && !tbl.isPartitioned() - && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), - MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); - } - - // create the table - db.createTable(tbl, crtTbl.getIfNotExists()); - addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); - return 0; - } - - /** * Create a new view. * * @param db @@ -4725,50 +3503,6 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { return 0; } - private int truncateTable(Hive db, TruncateTableDesc truncateTableDesc) throws HiveException { - if (truncateTableDesc.getColumnIndexes() != null) { - ColumnTruncateWork truncateWork = new ColumnTruncateWork( - truncateTableDesc.getColumnIndexes(), truncateTableDesc.getInputDir(), - truncateTableDesc.getOutputDir()); - truncateWork.setListBucketingCtx(truncateTableDesc.getLbCtx()); - truncateWork.setMapperCannotSpanPartns(true); - DriverContext driverCxt = new DriverContext(); - ColumnTruncateTask taskExec = new ColumnTruncateTask(); - taskExec.initialize(queryState, null, driverCxt, null); - taskExec.setWork(truncateWork); - taskExec.setQueryPlan(this.getQueryPlan()); - subtask = taskExec; - int ret = taskExec.execute(driverCxt); - if (subtask.getException() != null) { - setException(subtask.getException()); - } - return ret; - } - - String tableName = truncateTableDesc.getTableName(); - Map partSpec = truncateTableDesc.getPartSpec(); - - ReplicationSpec replicationSpec = truncateTableDesc.getReplicationSpec(); - if (!allowOperationInReplicationScope(db, tableName, partSpec, replicationSpec)) { - // no truncate, the table is missing either due to drop/rename which follows the truncate. - // or the existing table is newer than our update. - if (LOG.isDebugEnabled()) { - LOG.debug("DDLTask: Truncate Table/Partition is skipped as table {} / partition {} is newer than update", - tableName, - (partSpec == null) ? "null" : FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values()))); - } - return 0; - } - - try { - db.truncateTable(tableName, partSpec, - replicationSpec != null && replicationSpec.isInReplicationScope() ? truncateTableDesc.getWriteId() : 0L); - } catch (Exception e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR); - } - return 0; - } - private int exchangeTablePartition(Hive db, AlterTableExchangePartition exchangePartition) throws HiveException { Map partitionSpecs = exchangePartition.getPartitionSpecs(); @@ -4803,32 +3537,6 @@ public String getName() { return "DDL"; } - /** - * Make location in specified sd qualified. - * - * @param databaseName - * Database name. - */ - public static void makeLocationQualified(String databaseName, Table table, HiveConf conf) throws HiveException { - Path path = null; - StorageDescriptor sd = table.getTTable().getSd(); - // If the table's location is currently unset, it is left unset, allowing the metastore to - // fill in the table's location. - // Note that the previous logic for some reason would make a special case if the DB was the - // default database, and actually attempt to generate a location. - // This seems incorrect and uncessary, since the metastore is just as able to fill in the - // default table location in the case of the default DB, as it is for non-default DBs. - if (sd.isSetLocation()) - { - path = new Path(sd.getLocation()); - } - - if (path != null) - { - sd.setLocation(Utilities.getQualifiedPath(conf, path)); - } - } - /** * Validate if the given table/partition is eligible for update * @@ -4868,21 +3576,6 @@ private boolean allowOperationInReplicationScope(Hive db, String tableName, return false; } - public static boolean doesTableNeedLocation(Table tbl) { - // TODO: If we are ok with breaking compatibility of existing 3rd party StorageHandlers, - // this method could be moved to the HiveStorageHandler interface. - boolean retval = true; - if (tbl.getStorageHandler() != null) { - // TODO: why doesn't this check class name rather than toString? - String sh = tbl.getStorageHandler().toString(); - retval = !sh.equals("org.apache.hadoop.hive.hbase.HBaseStorageHandler") - && !sh.equals(Constants.DRUID_HIVE_STORAGE_HANDLER_ID) - && !sh.equals(Constants.JDBC_HIVE_STORAGE_HANDLER_ID) - && !sh.equals("org.apache.hadoop.hive.accumulo.AccumuloStorageHandler"); - } - return retval; - } - private int remFirstIncPendFlag(Hive hive, ReplRemoveFirstIncLoadPendFlagDesc desc) throws HiveException, TException { String dbNameOrPattern = desc.getDatabaseName(); String tableNameOrPattern = desc.getTableName(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index c1773c9..0add38b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; @@ -349,8 +349,8 @@ private Path locationOnReplicaWarehouse(Table table, AddPartitionDesc.OnePartiti Map> partSpecsExpr = ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec)); if (partSpecsExpr.size() > 0) { - DropTableDesc dropPtnDesc = new DropTableDesc(table.getFullyQualifiedName(), - partSpecsExpr, null, true, event.replicationSpec()); + DropPartitionDesc dropPtnDesc = new DropPartitionDesc(table.getFullyQualifiedName(), partSpecsExpr, true, + event.replicationSpec()); dropPtnTask = TaskFactory.get( new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf ); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 3b0b67a..854c277 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -46,7 +48,6 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; @@ -325,6 +326,6 @@ static TableLocationTuple tableLocation(ImportTableDesc tblDesc, Database parent assert(table != null); DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), table.getTableType(), true, false, event.replicationSpec()); - return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf); + return TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 3961baa..50a233d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.Entity; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -68,7 +69,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.HadoopShims; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 4aea872..800d80a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -41,13 +41,13 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; import org.apache.thrift.TException; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 43dba73..bb46bf9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -26,11 +26,11 @@ import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import java.util.List; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java index 0abec56..0e148ed 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -38,8 +40,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; /** * An implementation HiveTxnManager that includes internal methods that all diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 17576ff..33d157d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -127,7 +127,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; @@ -3363,7 +3363,7 @@ public boolean dropPartition(String dbName, String tableName, List partV public List dropPartitions(Table table, ListpartDirNames, boolean deleteData, boolean ifExists) throws HiveException { // partitions to be dropped in this batch - List partSpecs = new ArrayList<>(partDirNames.size()); + List partSpecs = new ArrayList<>(partDirNames.size()); // parts of the partition String[] parts = null; @@ -3413,7 +3413,7 @@ public boolean dropPartition(String dbName, String tableName, List partV } // Add the expression to partition specification - partSpecs.add(new DropTableDesc.PartSpec(expr, partSpecKey)); + partSpecs.add(new DropPartitionDesc.PartSpec(expr, partSpecKey)); // Increment dropKey to get a new key for hash map ++partSpecKey; @@ -3423,14 +3423,14 @@ public boolean dropPartition(String dbName, String tableName, List partV return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); } - public List dropPartitions(String tblName, List partSpecs, + public List dropPartitions(String tblName, List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { String[] names = Utilities.getDbTableName(tblName); return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); } public List dropPartitions(String dbName, String tblName, - List partSpecs, boolean deleteData, + List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { return dropPartitions(dbName, tblName, partSpecs, PartitionDropOptions.instance() @@ -3438,19 +3438,19 @@ public boolean dropPartition(String dbName, String tableName, List partV .ifExists(ifExists)); } - public List dropPartitions(String tblName, List partSpecs, + public List dropPartitions(String tblName, List partSpecs, PartitionDropOptions dropOptions) throws HiveException { String[] names = Utilities.getDbTableName(tblName); return dropPartitions(names[0], names[1], partSpecs, dropOptions); } public List dropPartitions(String dbName, String tblName, - List partSpecs, PartitionDropOptions dropOptions) throws HiveException { + List partSpecs, PartitionDropOptions dropOptions) throws HiveException { try { Table tbl = getTable(dbName, tblName); List> partExprs = new ArrayList<>(partSpecs.size()); - for (DropTableDesc.PartSpec partSpec : partSpecs) { + for (DropPartitionDesc.PartSpec partSpec : partSpecs) { partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec()))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index 4180dc4..e181b59 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; import org.apache.hadoop.hive.ql.metadata.CheckConstraint; import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; @@ -53,7 +54,6 @@ import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; import org.apache.hadoop.hive.ql.metadata.UniqueConstraint.UniqueConstraintCol; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo.ForeignKeyCol; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.serde2.io.DateWritableV2; import org.apache.hive.common.util.HiveStringUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index fbeb9c8..07ace85 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hive.common.util.HiveStringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,6 +47,7 @@ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.CheckConstraint; import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java index 4b2958a..960dd34 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java @@ -36,7 +36,10 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.exec.DDLTask; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.StatsTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -47,9 +50,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.ExportWork; import org.apache.hadoop.hive.ql.session.SessionState; @@ -151,7 +152,7 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { try { ReadEntity dbForTmpTable = new ReadEntity(db.getDatabase(exportTable.getDbName())); inputs.add(dbForTmpTable); //so the plan knows we are 'reading' this db - locks, security... - DDLTask createTableTask = (DDLTask) TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), ctlt), conf); + DDLTask2 createTableTask = (DDLTask2) TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), ctlt), conf); createTableTask.setConf(conf); //above get() doesn't set it createTableTask.execute(new DriverContext(new Context(conf))); newTable = db.getTable(newTableName); @@ -199,7 +200,7 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { // {@link DDLSemanticAnalyzer#analyzeDropTable(ASTNode ast, TableType expectedType) ReplicationSpec replicationSpec = new ReplicationSpec(); DropTableDesc dropTblDesc = new DropTableDesc(newTableName, TableType.MANAGED_TABLE, false, true, replicationSpec); - Task dropTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), conf); + Task dropTask = TaskFactory.get(new DDLWork2(new HashSet<>(), new HashSet<>(), dropTblDesc), conf); exportTask.addDependentTask(dropTask); markReadEntityForUpdate(); if (ctx.isExplainPlan()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 4a542ae..baf6356 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -76,9 +76,19 @@ import org.apache.hadoop.hive.ql.ddl.database.DescDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.DropDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.LockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.database.ShowCreateDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.ddl.database.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.ddl.database.UnlockDatabaseDesc; +import org.apache.hadoop.hive.ql.ddl.table.DescTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.LockTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTablesDesc; +import org.apache.hadoop.hive.ql.ddl.table.ShowTablePropertiesDesc; +import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.UnlockTableDesc; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -131,9 +141,8 @@ import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; @@ -145,7 +154,6 @@ import org.apache.hadoop.hive.ql.plan.KillQueryDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; @@ -155,21 +163,14 @@ import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowConfDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; -import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; -import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde.serdeConstants; @@ -1445,8 +1446,7 @@ private void analyzeDropTable(ASTNode ast, TableType expectedType) boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null); DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectedType, ifExists, ifPurge, replicationSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - dropTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), dropTblDesc))); } private void analyzeTruncateTable(ASTNode ast) throws SemanticException { @@ -1493,7 +1493,7 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { setAcidDdlDesc(truncateTblDesc); } - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc); + DDLWork2 ddlWork = new DDLWork2(getInputs(), getOutputs(), truncateTblDesc); Task truncateTask = TaskFactory.get(ddlWork); // Is this a truncate column command @@ -2518,26 +2518,26 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { validateTable(tableName, partSpec); } - DescTableDesc descTblDesc = new DescTableDesc( - ctx.getResFile(), tableName, partSpec, colPath); - boolean showColStats = false; + boolean isFormatted = false; + boolean isExt = false; if (ast.getChildCount() == 2) { int descOptions = ast.getChild(1).getType(); - descTblDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED); - descTblDesc.setExt(descOptions == HiveParser.KW_EXTENDED); + isFormatted = descOptions == HiveParser.KW_FORMATTED; + isExt = descOptions == HiveParser.KW_EXTENDED; // in case of "DESCRIBE FORMATTED tablename column_name" statement, colPath // will contain tablename.column_name. If column_name is not specified // colPath will be equal to tableName. This is how we can differentiate // if we are describing a table or column - if (!colPath.equalsIgnoreCase(tableName) && descTblDesc.isFormatted()) { + if (!colPath.equalsIgnoreCase(tableName) && isFormatted) { showColStats = true; } } inputs.add(new ReadEntity(getTable(tableName))); - Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - descTblDesc)); + + DescTableDesc descTblDesc = new DescTableDesc(ctx.getResFile(), tableName, partSpec, colPath, isExt, isFormatted); + Task ddlTask = TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), descTblDesc)); rootTasks.add(ddlTask); String schema = DescTableDesc.getSchema(showColStats); setFetchTask(createFetchTask(schema)); @@ -2620,14 +2620,12 @@ private void analyzeShowPartitions(ASTNode ast) throws SemanticException { private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException { String dbName = getUnescapedName((ASTNode)ast.getChild(0)); - ShowCreateDatabaseDesc showCreateDbDesc = - new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString()); + ShowCreateDatabaseDesc showCreateDbDesc = new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString()); Database database = getDatabase(dbName); inputs.add(new ReadEntity(database)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showCreateDbDesc))); - setFetchTask(createFetchTask(showCreateDbDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showCreateDbDesc))); + setFetchTask(createFetchTask(ShowCreateDatabaseDesc.SCHEMA)); } @@ -2638,9 +2636,8 @@ private void analyzeShowCreateTable(ASTNode ast) throws SemanticException { Table tab = getTable(tableName); inputs.add(new ReadEntity(tab)); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showCreateTblDesc))); - setFetchTask(createFetchTask(showCreateTblDesc.getSchema())); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showCreateTblDesc))); + setFetchTask(createFetchTask(ShowCreateTableDesc.SCHEMA)); } private void analyzeShowDatabases(ASTNode ast) throws SemanticException { @@ -2686,8 +2683,7 @@ private void analyzeShowTables(ASTNode ast) throws SemanticException { showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames, tableTypeFilter, isExtended); inputs.add(new ReadEntity(getDatabase(dbName))); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblsDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblsDesc))); setFetchTask(createFetchTask(showTblsDesc.getSchema())); } @@ -2763,15 +2759,13 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { validateTable(tableNames, partSpec); } - showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, - tableNames, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblStatusDesc))); - setFetchTask(createFetchTask(showTblStatusDesc.getSchema())); + showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, tableNames, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblStatusDesc))); + setFetchTask(createFetchTask(ShowTableStatusDesc.SCHEMA)); } private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { - ShowTblPropertiesDesc showTblPropertiesDesc; + ShowTablePropertiesDesc showTblPropertiesDesc; String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String propertyName = null; if (ast.getChildCount() > 1) { @@ -2781,11 +2775,9 @@ private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { String tableNames = getDotName(qualified); validateTable(tableNames, null); - showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames, - propertyName); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showTblPropertiesDesc))); - setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema())); + showTblPropertiesDesc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableNames, propertyName); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showTblPropertiesDesc))); + setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA)); } /** @@ -2920,8 +2912,7 @@ private void analyzeShowViews(ASTNode ast) throws SemanticException { assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); dbName = unescapeIdentifier(ast.getChild(1).getText()); validateDatabase(dbName); - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showViewsDesc.setType(TableType.VIRTUAL_VIEW); + showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW); break; case 3: // Uses a pattern and specifies a DB assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); @@ -2931,13 +2922,11 @@ private void analyzeShowViews(ASTNode ast) throws SemanticException { showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, viewNames, TableType.VIRTUAL_VIEW); break; default: // No pattern or DB - showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showViewsDesc.setType(TableType.VIRTUAL_VIEW); + showViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.VIRTUAL_VIEW); break; } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showViewsDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showViewsDesc))); setFetchTask(createFetchTask(showViewsDesc.getSchema())); } @@ -2960,8 +2949,7 @@ private void analyzeShowMaterializedViews(ASTNode ast) throws SemanticException assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); dbName = unescapeIdentifier(ast.getChild(1).getText()); validateDatabase(dbName); - showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showMaterializedViewsDesc.setType(TableType.MATERIALIZED_VIEW); + showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW); break; case 3: // Uses a pattern and specifies a DB assert (ast.getChild(0).getType() == HiveParser.TOK_FROM); @@ -2972,13 +2960,11 @@ private void analyzeShowMaterializedViews(ASTNode ast) throws SemanticException ctx.getResFile(), dbName, materializedViewNames, TableType.MATERIALIZED_VIEW); break; default: // No pattern or DB - showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName); - showMaterializedViewsDesc.setType(TableType.MATERIALIZED_VIEW); + showMaterializedViewsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, TableType.MATERIALIZED_VIEW); break; } - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - showMaterializedViewsDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), showMaterializedViewsDesc))); setFetchTask(createFetchTask(showMaterializedViewsDesc.getSchema())); } @@ -3005,10 +2991,8 @@ private void analyzeLockTable(ASTNode ast) } LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, partSpec, - HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); - lockTblDesc.setQueryStr(this.ctx.getCmd()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - lockTblDesc))); + HiveConf.getVar(conf, ConfVars.HIVEQUERYID), ctx.getCmd()); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), lockTblDesc))); // Need to initialize the lock manager ctx.setNeedLockMgr(true); @@ -3107,8 +3091,7 @@ private void analyzeUnlockTable(ASTNode ast) } UnlockTableDesc unlockTblDesc = new UnlockTableDesc(tableName, partSpec); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - unlockTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), unlockTblDesc))); // Need to initialize the lock manager ctx.setNeedLockMgr(true); @@ -3438,9 +3421,8 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists); - DropTableDesc dropTblDesc = - new DropTableDesc(getDotName(qualified), partSpecs, expectView ? TableType.VIRTUAL_VIEW : null, - mustPurge, replicationSpec); + DropPartitionDesc dropTblDesc = + new DropPartitionDesc(getDotName(qualified), partSpecs, mustPurge, replicationSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index b6b4f58..cb9584c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -58,7 +60,6 @@ import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType; @@ -565,7 +566,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, ReplicationSpec replicationSpec) { DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), table.getTableType(), true, false, replicationSpec); - return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); + return TaskFactory.get(new DDLWork2(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); } private static Task alterTableTask(ImportTableDesc tableDesc, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 7b30b59..77e1818 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.QueryProperties; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.GroupByOperator; @@ -43,7 +44,6 @@ import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java deleted file mode 100644 index 2c8e1e1..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/parse/PreInsertTableDesc.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.parse; - -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.DDLDesc; -import org.apache.hadoop.hive.ql.plan.Explain; - -@Explain(displayName = "Pre-Insert task", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED }) -public class PreInsertTableDesc extends DDLDesc { - private final boolean isOverwrite; - private final Table table; - - public PreInsertTableDesc(Table table, boolean overwrite) { - this.table = table; - this.isOverwrite = overwrite; - } - - public Table getTable() { - return table; - } - - public boolean isOverwrite() { - return isOverwrite; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java index a2f6fbb..0405ee8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java @@ -30,8 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; /** diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 05257c9..18dbbb2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -98,6 +98,10 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.cache.results.CacheUsage; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.ddl.table.PreInsertTableDesc; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.ArchiveUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; @@ -190,8 +194,6 @@ import org.apache.hadoop.hive.ql.plan.AggregationDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; @@ -8079,7 +8081,7 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, private void createPreInsertDesc(Table table, boolean overwrite) { PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, overwrite); this.rootTasks - .add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), preInsertTableDesc))); + .add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), preInsertTableDesc))); } @@ -12488,10 +12490,10 @@ void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticExce if (optionalTezTask.isPresent()) { final TezTask tezTask = optionalTezTask.get(); rootTasks.stream() - .filter(task -> task.getWork() instanceof DDLWork) - .map(task -> (DDLWork) task.getWork()) - .filter(ddlWork -> ddlWork.getPreInsertTableDesc() != null) - .map(ddlWork -> ddlWork.getPreInsertTableDesc()) + .filter(task -> task.getWork() instanceof DDLWork2) + .map(task -> (DDLWork2) task.getWork()) + .filter(ddlWork -> ddlWork.getDDLDesc() != null) + .map(ddlWork -> (PreInsertTableDesc)ddlWork.getDDLDesc()) .map(ddlPreInsertTask -> new InsertCommitHookDesc(ddlPreInsertTask.getTable(), ddlPreInsertTask.isOverwrite())) .forEach(insertCommitHookDesc -> tezTask.addDependentTask( @@ -13434,8 +13436,7 @@ ASTNode analyzeCreateTable( crtTblDesc.validate(conf); // outputs is empty, which means this create table happens in the current // database. - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - crtTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTblDesc))); break; case ctt: // CREATE TRANSACTIONAL TABLE if (isExt) { @@ -13459,7 +13460,7 @@ ASTNode analyzeCreateTable( crtTranTblDesc.validate(conf); // outputs is empty, which means this create table happens in the current // database. - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTranTblDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTranTblDesc))); break; case CTLT: // create table like @@ -13478,8 +13479,7 @@ ASTNode analyzeCreateTable( storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - crtTblLikeDesc))); + rootTasks.add(TaskFactory.get(new DDLWork2(getInputs(), getOutputs(), crtTblLikeDesc))); break; case CTAS: // create table as select diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 8a51e21..0b6ff52 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; @@ -59,7 +61,6 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec; import org.apache.hadoop.hive.ql.plan.BasicStatsWork; import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.FetchWork; @@ -358,8 +359,7 @@ public void compile(final ParseContext pCtx, // generate a DDL task and make it a dependent task of the leaf CreateTableDesc crtTblDesc = pCtx.getCreateTable(); crtTblDesc.validate(conf); - Task crtTblTask = TaskFactory.get(new DDLWork( - inputs, outputs, crtTblDesc)); + Task crtTblTask = TaskFactory.get(new DDLWork2(inputs, outputs, crtTblDesc)); patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames())); } else if (pCtx.getQueryProperties().isMaterializedView()) { // generate a DDL task and make it a dependent task of the leaf diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index b95a35a..5e88b6e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropPartitionDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import java.io.Serializable; @@ -43,8 +43,8 @@ Map> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions()); if (partSpecs.size() > 0) { - DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName, - partSpecs, null, true, context.eventOnlyReplicationSpec()); + DropPartitionDesc dropPtnDesc = new DropPartitionDesc(actualDbName + "." + actualTblName, partSpecs, true, + context.eventOnlyReplicationSpec()); Task dropPtnTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf ); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java index 62784e9..edef74e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.DropTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; import java.io.Serializable; import java.util.Collections; @@ -39,8 +39,8 @@ actualDbName + "." + actualTblName, null, true, true, context.eventOnlyReplicationSpec(), false ); - Task dropTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf + Task dropTableTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf ); context.log.debug( "Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName() diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index dec6ed5..05a9f91 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -19,12 +19,12 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import java.io.Serializable; import java.util.Iterator; @@ -59,8 +59,8 @@ actualDbName + "." + actualTblName, partSpec, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); - Task truncatePtnTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); + Task truncatePtnTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, partSpec); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index f037cbb..5ef66fa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.TruncateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import java.io.Serializable; import java.util.List; @@ -39,8 +39,8 @@ actualDbName + "." + actualTblName, null, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); - Task truncateTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); + Task truncateTableTask = TaskFactory.get( + new DDLWork2(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java deleted file mode 100644 index 4514af1..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ /dev/null @@ -1,955 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.PartitionManagementTask; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.exec.DDLTask; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.ParseUtils; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; -import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.Explain.Level; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.mapred.OutputFormat; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * CreateTableDesc. - * - */ -@Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateTableDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - private static Logger LOG = LoggerFactory.getLogger(CreateTableDesc.class); - String databaseName; - String tableName; - boolean isExternal; - List cols; - List partCols; - List partColNames; - List bucketCols; - List sortCols; - int numBuckets; - String fieldDelim; - String fieldEscape; - String collItemDelim; - String mapKeyDelim; - String lineDelim; - String nullFormat; - String comment; - String inputFormat; - String outputFormat; - String location; - String serName; - String storageHandler; - Map serdeProps; - Map tblProps; - boolean ifNotExists; - List skewedColNames; - List> skewedColValues; - boolean isStoredAsSubDirectories = false; - boolean isTemporary = false; - private boolean isMaterialization = false; - private boolean replaceMode = false; - private ReplicationSpec replicationSpec = null; - private boolean isCTAS = false; - List primaryKeys; - List foreignKeys; - List uniqueConstraints; - List notNullConstraints; - List defaultConstraints; - List checkConstraints; - private ColumnStatistics colStats; - private Long initialMmWriteId; // Initial MM write ID for CTAS and import. - // The FSOP configuration for the FSOP that is going to write initial data during ctas. - // This is not needed beyond compilation, so it is transient. - private transient FileSinkDesc writer; - private Long replWriteId; // to be used by repl task to get the txn and valid write id list - private String ownerName = null; - - public CreateTableDesc() { - } - - public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, - List cols, List partCols, - List bucketCols, List sortCols, int numBuckets, - String fieldDelim, String fieldEscape, String collItemDelim, - String mapKeyDelim, String lineDelim, String comment, String inputFormat, - String outputFormat, String location, String serName, - String storageHandler, - Map serdeProps, - Map tblProps, - boolean ifNotExists, List skewedColNames, List> skewedColValues, - List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints, - List defaultConstraints, List checkConstraints, - ColumnStatistics colStats) { - - this(tableName, isExternal, isTemporary, cols, partCols, - bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, - collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, - outputFormat, location, serName, storageHandler, serdeProps, - tblProps, ifNotExists, skewedColNames, skewedColValues, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); - - this.databaseName = databaseName; - this.colStats = colStats; - } - - public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, - List cols, List partColNames, - List bucketCols, List sortCols, int numBuckets, - String fieldDelim, String fieldEscape, String collItemDelim, - String mapKeyDelim, String lineDelim, String comment, String inputFormat, - String outputFormat, String location, String serName, - String storageHandler, - Map serdeProps, - Map tblProps, - boolean ifNotExists, List skewedColNames, List> skewedColValues, - boolean isCTAS, List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints, - List defaultConstraints, List checkConstraints) { - this(databaseName, tableName, isExternal, isTemporary, cols, new ArrayList<>(), - bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, - collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, - outputFormat, location, serName, storageHandler, serdeProps, - tblProps, ifNotExists, skewedColNames, skewedColValues, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, - null); - this.partColNames = partColNames; - this.isCTAS = isCTAS; - } - - public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary, - List cols, List partCols, - List bucketCols, List sortCols, int numBuckets, - String fieldDelim, String fieldEscape, String collItemDelim, - String mapKeyDelim, String lineDelim, String comment, String inputFormat, - String outputFormat, String location, String serName, - String storageHandler, - Map serdeProps, - Map tblProps, - boolean ifNotExists, List skewedColNames, List> skewedColValues, - List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints, - List defaultConstraints, List checkConstraints) { - this.tableName = tableName; - this.isExternal = isExternal; - this.isTemporary = isTemporary; - this.bucketCols = new ArrayList(bucketCols); - this.sortCols = new ArrayList(sortCols); - this.collItemDelim = collItemDelim; - this.cols = new ArrayList(cols); - this.comment = comment; - this.fieldDelim = fieldDelim; - this.fieldEscape = fieldEscape; - this.inputFormat = inputFormat; - this.outputFormat = outputFormat; - this.lineDelim = lineDelim; - this.location = location; - this.mapKeyDelim = mapKeyDelim; - this.numBuckets = numBuckets; - this.partCols = new ArrayList(partCols); - this.serName = serName; - this.storageHandler = storageHandler; - this.serdeProps = serdeProps; - this.tblProps = tblProps; - this.ifNotExists = ifNotExists; - this.skewedColNames = copyList(skewedColNames); - this.skewedColValues = copyList(skewedColValues); - this.primaryKeys = copyList(primaryKeys); - this.foreignKeys = copyList(foreignKeys); - this.uniqueConstraints = copyList(uniqueConstraints); - this.notNullConstraints = copyList(notNullConstraints); - this.defaultConstraints = copyList(defaultConstraints); - this.checkConstraints= copyList(checkConstraints); - } - - private static List copyList(List copy) { - return copy == null ? null : new ArrayList(copy); - } - - @Explain(displayName = "columns") - public List getColsString() { - return Utilities.getFieldSchemaString(getCols()); - } - - @Explain(displayName = "partition columns") - public List getPartColsString() { - return Utilities.getFieldSchemaString(getPartCols()); - } - - @Explain(displayName = "if not exists", displayOnlyOnTrue = true) - public boolean getIfNotExists() { - return ifNotExists; - } - - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } - - @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - public String getDatabaseName(){ - return databaseName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public List getCols() { - return cols; - } - - public void setCols(ArrayList cols) { - this.cols = cols; - } - - public List getPartCols() { - return partCols; - } - - public void setPartCols(ArrayList partCols) { - this.partCols = partCols; - } - - public List getPartColNames() { - return partColNames; - } - - public void setPartColNames(ArrayList partColNames) { - this.partColNames = partColNames; - } - - public List getPrimaryKeys() { - return primaryKeys; - } - - public void setPrimaryKeys(ArrayList primaryKeys) { - this.primaryKeys = primaryKeys; - } - - public List getForeignKeys() { - return foreignKeys; - } - - public void setForeignKeys(ArrayList foreignKeys) { - this.foreignKeys = foreignKeys; - } - - public List getUniqueConstraints() { - return uniqueConstraints; - } - - public List getNotNullConstraints() { - return notNullConstraints; - } - - public List getDefaultConstraints() { - return defaultConstraints; - } - - public List getCheckConstraints() { return checkConstraints; } - - @Explain(displayName = "bucket columns") - public List getBucketCols() { - return bucketCols; - } - - public void setBucketCols(ArrayList bucketCols) { - this.bucketCols = bucketCols; - } - - @Explain(displayName = "# buckets") - public Integer getNumBucketsExplain() { - if (numBuckets == -1) { - return null; - } else { - return numBuckets; - } - } - - public int getNumBuckets() { - return numBuckets; - } - - public void setNumBuckets(int numBuckets) { - this.numBuckets = numBuckets; - } - - @Explain(displayName = "field delimiter") - public String getFieldDelim() { - return fieldDelim; - } - - public void setFieldDelim(String fieldDelim) { - this.fieldDelim = fieldDelim; - } - - @Explain(displayName = "field escape") - public String getFieldEscape() { - return fieldEscape; - } - - public void setFieldEscape(String fieldEscape) { - this.fieldEscape = fieldEscape; - } - - @Explain(displayName = "collection delimiter") - public String getCollItemDelim() { - return collItemDelim; - } - - public void setCollItemDelim(String collItemDelim) { - this.collItemDelim = collItemDelim; - } - - @Explain(displayName = "map key delimiter") - public String getMapKeyDelim() { - return mapKeyDelim; - } - - public void setMapKeyDelim(String mapKeyDelim) { - this.mapKeyDelim = mapKeyDelim; - } - - @Explain(displayName = "line delimiter") - public String getLineDelim() { - return lineDelim; - } - - public void setLineDelim(String lineDelim) { - this.lineDelim = lineDelim; - } - - @Explain(displayName = "comment") - public String getComment() { - return comment; - } - - public void setComment(String comment) { - this.comment = comment; - } - - @Explain(displayName = "input format") - public String getInputFormat() { - return inputFormat; - } - - public void setInputFormat(String inputFormat) { - this.inputFormat = inputFormat; - } - - @Explain(displayName = "output format") - public String getOutputFormat() { - return outputFormat; - } - - public void setOutputFormat(String outputFormat) { - this.outputFormat = outputFormat; - } - - @Explain(displayName = "storage handler") - public String getStorageHandler() { - return storageHandler; - } - - public void setStorageHandler(String storageHandler) { - this.storageHandler = storageHandler; - } - - @Explain(displayName = "location") - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - @Explain(displayName = "isExternal", displayOnlyOnTrue = true) - public boolean isExternal() { - return isExternal; - } - - public void setExternal(boolean isExternal) { - this.isExternal = isExternal; - } - - /** - * @return the sortCols - */ - @Explain(displayName = "sort columns") - public List getSortCols() { - return sortCols; - } - - /** - * @param sortCols - * the sortCols to set - */ - public void setSortCols(ArrayList sortCols) { - this.sortCols = sortCols; - } - - /** - * @return the serDeName - */ - @Explain(displayName = "serde name") - public String getSerName() { - return serName; - } - - /** - * @param serName - * the serName to set - */ - public void setSerName(String serName) { - this.serName = serName; - } - - /** - * @return the serDe properties - */ - @Explain(displayName = "serde properties") - public Map getSerdeProps() { - return serdeProps; - } - - /** - * @param serdeProps - * the serde properties to set - */ - public void setSerdeProps(Map serdeProps) { - this.serdeProps = serdeProps; - } - - /** - * @return the table properties - */ - @Explain(displayName = "table properties") - public Map getTblProps() { - return tblProps; - } - - /** - * @param tblProps - * the table properties to set - */ - public void setTblProps(Map tblProps) { - this.tblProps = tblProps; - } - - /** - * @return the skewedColNames - */ - public List getSkewedColNames() { - return skewedColNames; - } - - /** - * @param skewedColNames the skewedColNames to set - */ - public void setSkewedColNames(ArrayList skewedColNames) { - this.skewedColNames = skewedColNames; - } - - /** - * @return the skewedColValues - */ - public List> getSkewedColValues() { - return skewedColValues; - } - - /** - * @param skewedColValues the skewedColValues to set - */ - public void setSkewedColValues(ArrayList> skewedColValues) { - this.skewedColValues = skewedColValues; - } - - public void validate(HiveConf conf) - throws SemanticException { - - if ((this.getCols() == null) || (this.getCols().size() == 0)) { - // for now make sure that serde exists - if (Table.hasMetastoreBasedSchema(conf, serName) && - StringUtils.isEmpty(getStorageHandler())) { - throw new SemanticException(ErrorMsg.INVALID_TBL_DDL_SERDE.getMsg()); - } - return; - } - - if (this.getStorageHandler() == null) { - try { - Class origin = Class.forName(this.getOutputFormat(), true, - Utilities.getSessionSpecifiedClassLoader()); - Class replaced = HiveFileFormatUtils - .getOutputFormatSubstitute(origin); - if (!HiveOutputFormat.class.isAssignableFrom(replaced)) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE - .getMsg()); - } - } catch (ClassNotFoundException e) { - throw new SemanticException(ErrorMsg.CLASSPATH_ERROR.getMsg(), e); - } - } - - List colNames = ParseUtils.validateColumnNameUniqueness(this.getCols()); - - if (this.getBucketCols() != null) { - // all columns in cluster and sort are valid columns - Iterator bucketCols = this.getBucketCols().iterator(); - while (bucketCols.hasNext()) { - String bucketCol = bucketCols.next(); - boolean found = false; - Iterator colNamesIter = colNames.iterator(); - while (colNamesIter.hasNext()) { - String colName = colNamesIter.next(); - if (bucketCol.equalsIgnoreCase(colName)) { - found = true; - break; - } - } - if (!found) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(" \'" + bucketCol + "\'")); - } - } - } - - if (this.getSortCols() != null) { - // all columns in cluster and sort are valid columns - Iterator sortCols = this.getSortCols().iterator(); - while (sortCols.hasNext()) { - String sortCol = sortCols.next().getCol(); - boolean found = false; - Iterator colNamesIter = colNames.iterator(); - while (colNamesIter.hasNext()) { - String colName = colNamesIter.next(); - if (sortCol.equalsIgnoreCase(colName)) { - found = true; - break; - } - } - if (!found) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(" \'" + sortCol + "\'")); - } - } - } - - if (this.getPartCols() != null) { - // there is no overlap between columns and partitioning columns - Iterator partColsIter = this.getPartCols().iterator(); - while (partColsIter.hasNext()) { - FieldSchema fs = partColsIter.next(); - String partCol = fs.getName(); - TypeInfo pti = null; - try { - pti = TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()); - } catch (Exception err) { - LOG.error("Failed to get type info", err); - } - if(null == pti){ - throw new SemanticException(ErrorMsg.PARTITION_COLUMN_NON_PRIMITIVE.getMsg() + " Found " - + partCol + " of type: " + fs.getType()); - } - Iterator colNamesIter = colNames.iterator(); - while (colNamesIter.hasNext()) { - String colName = BaseSemanticAnalyzer.unescapeIdentifier(colNamesIter.next()); - if (partCol.equalsIgnoreCase(colName)) { - throw new SemanticException( - ErrorMsg.COLUMN_REPEATED_IN_PARTITIONING_COLS.getMsg()); - } - } - } - } - - /* Validate skewed information. */ - ValidationUtility.validateSkewedInformation(colNames, this.getSkewedColNames(), - this.getSkewedColValues()); - } - - /** - * @return the isStoredAsSubDirectories - */ - public boolean isStoredAsSubDirectories() { - return isStoredAsSubDirectories; - } - - /** - * @param isStoredAsSubDirectories the isStoredAsSubDirectories to set - */ - public void setStoredAsSubDirectories(boolean isStoredAsSubDirectories) { - this.isStoredAsSubDirectories = isStoredAsSubDirectories; - } - - /** - * @return the nullFormat - */ - public String getNullFormat() { - return nullFormat; - } - - /** - * Set null format string - * @param nullFormat - */ - public void setNullFormat(String nullFormat) { - this.nullFormat = nullFormat; - } - - /** - * @return the isTemporary - */ - @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) - public boolean isTemporary() { - return isTemporary; - } - - /** - * @param isTemporary table is Temporary or not. - */ - public void setTemporary(boolean isTemporary) { - this.isTemporary = isTemporary; - } - - /** - * @return the isMaterialization - */ - @Explain(displayName = "isMaterialization", displayOnlyOnTrue = true) - public boolean isMaterialization() { - return isMaterialization; - } - - /** - * @param isMaterialization table is a materialization or not. - */ - public void setMaterialization(boolean isMaterialization) { - this.isMaterialization = isMaterialization; - } - - /** - * @param replaceMode Determine if this CreateTable should behave like a replace-into alter instead - */ - public void setReplaceMode(boolean replaceMode) { - this.replaceMode = replaceMode; - } - - /** - * @return true if this CreateTable should behave like a replace-into alter instead - */ - public boolean getReplaceMode() { - return replaceMode; - } - - /** - * @param replicationSpec Sets the replication spec governing this create. - * This parameter will have meaningful values only for creates happening as a result of a replication. - */ - public void setReplicationSpec(ReplicationSpec replicationSpec) { - this.replicationSpec = replicationSpec; - } - - /** - * @return what kind of replication scope this drop is running under. - * This can result in a "CREATE/REPLACE IF NEWER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec(){ - if (replicationSpec == null){ - this.replicationSpec = new ReplicationSpec(); - } - return this.replicationSpec; - } - - public boolean isCTAS() { - return isCTAS; - } - - public Table toTable(HiveConf conf) throws HiveException { - String databaseName = getDatabaseName(); - String tableName = getTableName(); - - if (databaseName == null || tableName.contains(".")) { - String[] names = Utilities.getDbTableName(tableName); - databaseName = names[0]; - tableName = names[1]; - } - - Table tbl = new Table(databaseName, tableName); - - if (getTblProps() != null) { - tbl.getTTable().getParameters().putAll(getTblProps()); - } - - if (getPartCols() != null) { - tbl.setPartCols(getPartCols()); - } - - if (getNumBuckets() != -1) { - tbl.setNumBuckets(getNumBuckets()); - } - - if (getStorageHandler() != null) { - tbl.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, - getStorageHandler()); - } - HiveStorageHandler storageHandler = tbl.getStorageHandler(); - - /* - * If the user didn't specify a SerDe, we use the default. - */ - String serDeClassName; - if (getSerName() == null) { - if (storageHandler == null) { - serDeClassName = PlanUtils.getDefaultSerDe().getName(); - LOG.info("Default to " + serDeClassName + " for table " + tableName); - } else { - serDeClassName = storageHandler.getSerDeClass().getName(); - LOG.info("Use StorageHandler-supplied " + serDeClassName - + " for table " + tableName); - } - } else { - // let's validate that the serde exists - serDeClassName = getSerName(); - DDLTask.validateSerDe(serDeClassName, conf); - } - tbl.setSerializationLib(serDeClassName); - - if (getFieldDelim() != null) { - tbl.setSerdeParam(serdeConstants.FIELD_DELIM, getFieldDelim()); - tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, getFieldDelim()); - } - if (getFieldEscape() != null) { - tbl.setSerdeParam(serdeConstants.ESCAPE_CHAR, getFieldEscape()); - } - - if (getCollItemDelim() != null) { - tbl.setSerdeParam(serdeConstants.COLLECTION_DELIM, getCollItemDelim()); - } - if (getMapKeyDelim() != null) { - tbl.setSerdeParam(serdeConstants.MAPKEY_DELIM, getMapKeyDelim()); - } - if (getLineDelim() != null) { - tbl.setSerdeParam(serdeConstants.LINE_DELIM, getLineDelim()); - } - if (getNullFormat() != null) { - tbl.setSerdeParam(serdeConstants.SERIALIZATION_NULL_FORMAT, getNullFormat()); - } - if (getSerdeProps() != null) { - Iterator> iter = getSerdeProps().entrySet() - .iterator(); - while (iter.hasNext()) { - Map.Entry m = iter.next(); - tbl.setSerdeParam(m.getKey(), m.getValue()); - } - } - - if (getCols() != null) { - tbl.setFields(getCols()); - } - if (getBucketCols() != null) { - tbl.setBucketCols(getBucketCols()); - } - if (getSortCols() != null) { - tbl.setSortCols(getSortCols()); - } - if (getComment() != null) { - tbl.setProperty("comment", getComment()); - } - if (getLocation() != null) { - tbl.setDataLocation(new Path(getLocation())); - } - - if (getSkewedColNames() != null) { - tbl.setSkewedColNames(getSkewedColNames()); - } - if (getSkewedColValues() != null) { - tbl.setSkewedColValues(getSkewedColValues()); - } - - tbl.getTTable().setTemporary(isTemporary()); - - tbl.setStoredAsSubDirectories(isStoredAsSubDirectories()); - - tbl.setInputFormatClass(getInputFormat()); - tbl.setOutputFormatClass(getOutputFormat()); - - // only persist input/output format to metadata when it is explicitly specified. - // Otherwise, load lazily via StorageHandler at query time. - if (getInputFormat() != null && !getInputFormat().isEmpty()) { - tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName()); - } - if (getOutputFormat() != null && !getOutputFormat().isEmpty()) { - tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName()); - } - - if (DDLTask.doesTableNeedLocation(tbl)) { - // If location is specified - ensure that it is a full qualified name - DDLTask.makeLocationQualified(tbl.getDbName(), tbl, conf); - } - - if (isExternal()) { - tbl.setProperty("EXTERNAL", "TRUE"); - tbl.setTableType(TableType.EXTERNAL_TABLE); - // only add if user have not explicit set it (user explicitly disabled for example in which case don't flip it) - if (tbl.getProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY) == null) { - // partition discovery is on by default if undefined - tbl.setProperty(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - } - } - - // If the sorted columns is a superset of bucketed columns, store this fact. - // It can be later used to - // optimize some group-by queries. Note that, the order does not matter as - // long as it in the first - // 'n' columns where 'n' is the length of the bucketed columns. - if ((tbl.getBucketCols() != null) && (tbl.getSortCols() != null)) { - List bucketCols = tbl.getBucketCols(); - List sortCols = tbl.getSortCols(); - - if ((sortCols.size() > 0) && (sortCols.size() >= bucketCols.size())) { - boolean found = true; - - Iterator iterBucketCols = bucketCols.iterator(); - while (iterBucketCols.hasNext()) { - String bucketCol = iterBucketCols.next(); - boolean colFound = false; - for (int i = 0; i < bucketCols.size(); i++) { - if (bucketCol.equals(sortCols.get(i).getCol())) { - colFound = true; - break; - } - } - if (colFound == false) { - found = false; - break; - } - } - if (found) { - tbl.setProperty("SORTBUCKETCOLSPREFIX", "TRUE"); - } - } - } - - if (colStats != null) { - ColumnStatisticsDesc colStatsDesc = new ColumnStatisticsDesc(colStats.getStatsDesc()); - colStatsDesc.setCatName(tbl.getCatName()); - colStatsDesc.setDbName(getTableName()); - colStatsDesc.setDbName(getDatabaseName()); - tbl.getTTable().setColStats(new ColumnStatistics(colStatsDesc, colStats.getStatsObj())); - } - - // The statistics for non-transactional tables will be obtained from the source. Do not - // reset those on replica. - if (replicationSpec != null && replicationSpec.isInReplicationScope() && - !TxnUtils.isTransactionalTable(tbl.getTTable())) { - // Do nothing to the table statistics. - } else { - if (!this.isCTAS && (tbl.getPath() == null || (tbl.isEmpty() && !isExternal()))) { - if (!tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), - MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); - } - } else { - StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), null, - StatsSetupConst.FALSE); - } - } - - if (ownerName != null) { - tbl.setOwner(ownerName); - } - return tbl; - } - - public void setInitialMmWriteId(Long mmWriteId) { - this.initialMmWriteId = mmWriteId; - } - - public Long getInitialMmWriteId() { - return initialMmWriteId; - } - - - - public FileSinkDesc getAndUnsetWriter() { - FileSinkDesc fsd = writer; - writer = null; - return fsd; - } - - public void setWriter(FileSinkDesc writer) { - this.writer = writer; - } - - public Long getReplWriteId() { - return replWriteId; - } - - public void setReplWriteId(Long replWriteId) { - this.replWriteId = replWriteId; - } - - public String getOwnerName() { - return ownerName; - } - - public void setOwnerName(String ownerName) { - this.ownerName = ownerName; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java deleted file mode 100644 index 2cc0712..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.Map; - -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * CreateTableLikeDesc. - * - */ -@Explain(displayName = "Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class CreateTableLikeDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - String tableName; - boolean isExternal; - String defaultInputFormat; - String defaultOutputFormat; - String defaultSerName; - Map defaultSerdeProps; - String location; - Map tblProps; - boolean ifNotExists; - String likeTableName; - boolean isTemporary = false; - boolean isUserStorageFormat = false; - - public CreateTableLikeDesc() { - } - - public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary, - String defaultInputFormat, String defaultOutputFormat, String location, - String defaultSerName, Map defaultSerdeProps, Map tblProps, - boolean ifNotExists, String likeTableName, boolean isUserStorageFormat) { - this.tableName = tableName; - this.isExternal = isExternal; - this.isTemporary = isTemporary; - this.defaultInputFormat=defaultInputFormat; - this.defaultOutputFormat=defaultOutputFormat; - this.defaultSerName=defaultSerName; - this.defaultSerdeProps=defaultSerdeProps; - this.location = location; - this.tblProps = tblProps; - this.ifNotExists = ifNotExists; - this.likeTableName = likeTableName; - this.isUserStorageFormat = isUserStorageFormat; - } - - @Explain(displayName = "if not exists", displayOnlyOnTrue = true) - public boolean getIfNotExists() { - return ifNotExists; - } - - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } - - @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - @Explain(displayName = "default input format") - public String getDefaultInputFormat() { - return defaultInputFormat; - } - - public void setInputFormat(String inputFormat) { - this.defaultInputFormat = inputFormat; - } - - @Explain(displayName = "default output format") - public String getDefaultOutputFormat() { - return defaultOutputFormat; - } - - public void setOutputFormat(String outputFormat) { - this.defaultOutputFormat = outputFormat; - } - - @Explain(displayName = "location") - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - @Explain(displayName = "isExternal", displayOnlyOnTrue = true) - public boolean isExternal() { - return isExternal; - } - - public void setExternal(boolean isExternal) { - this.isExternal = isExternal; - } - - /** - * @return the default serDeName - */ - @Explain(displayName = "default serde name") - public String getDefaultSerName() { - return defaultSerName; - } - - /** - * @param serName - * the serName to set - */ - public void setDefaultSerName(String serName) { - this.defaultSerName = serName; - } - - /** - * @return the default serDe properties - */ - @Explain(displayName = "serde properties") - public Map getDefaultSerdeProps() { - return defaultSerdeProps; - } - - /** - * @param serdeProps - * the default serde properties to set - */ - public void setDefaultSerdeProps(Map serdeProps) { - this.defaultSerdeProps = serdeProps; - } - - @Explain(displayName = "like") - public String getLikeTableName() { - return likeTableName; - } - - public void setLikeTableName(String likeTableName) { - this.likeTableName = likeTableName; - } - - /** - * @return the table properties - */ - @Explain(displayName = "table properties") - public Map getTblProps() { - return tblProps; - } - - /** - * @param tblProps - * the table properties to set - */ - public void setTblProps(Map tblProps) { - this.tblProps = tblProps; - } - - /** - * @return the isTemporary - */ - @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) - public boolean isTemporary() { - return isTemporary; - } - - /** - * @param isTemporary table is Temporary or not. - */ - public void setTemporary(boolean isTemporary) { - this.isTemporary = isTemporary; - } - - /** - * True if user has specified storage format in query - * @return boolean - */ - public boolean isUserStorageFormat() { - return this.isUserStorageFormat; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 6527e52..1685256 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hive.ql.plan; +import org.apache.hadoop.hive.ql.ddl.database.ShowCreateDatabaseDesc; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; -import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc; import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.io.Serializable; @@ -34,19 +34,12 @@ private static final long serialVersionUID = 1L; // TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates. - private PreInsertTableDesc preInsertTableDesc; private InsertCommitHookDesc insertCommitHookDesc; private AlterMaterializedViewDesc alterMVDesc; - private CreateTableDesc createTblDesc; - private CreateTableLikeDesc createTblLikeDesc; private CreateViewDesc createVwDesc; - private DropTableDesc dropTblDesc; + private DropPartitionDesc dropPartitionDesc; private AlterTableDesc alterTblDesc; - private ShowTablesDesc showTblsDesc; private ShowColumnsDesc showColumnsDesc; - private ShowTblPropertiesDesc showTblPropertiesDesc; - private LockTableDesc lockTblDesc; - private UnlockTableDesc unlockTblDesc; private ShowFunctionsDesc showFuncsDesc; private ShowLocksDesc showLocksDesc; private ShowCompactionsDesc showCompactionsDesc; @@ -54,16 +47,11 @@ private AbortTxnsDesc abortTxnsDesc; private DescFunctionDesc descFunctionDesc; private ShowPartitionsDesc showPartsDesc; - private ShowCreateDatabaseDesc showCreateDbDesc; - private ShowCreateTableDesc showCreateTblDesc; - private DescTableDesc descTblDesc; private AddPartitionDesc addPartitionDesc; private RenamePartitionDesc renamePartitionDesc; private AlterTableSimpleDesc alterTblSimpleDesc; private MsckDesc msckDesc; - private ShowTableStatusDesc showTblStatusDesc; private AlterTableAlterPartDesc alterTableAlterPartDesc; - private TruncateTableDesc truncateTblDesc; private AlterTableExchangePartition alterTableExchangePartition; private KillQueryDesc killQueryDesc; @@ -116,12 +104,6 @@ public DDLWork(HashSet inputs, HashSet outputs) { } public DDLWork(HashSet inputs, HashSet outputs, - TruncateTableDesc truncateTblDesc) { - this(inputs, outputs); - this.truncateTblDesc = truncateTblDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, ShowConfDesc showConfDesc) { this(inputs, outputs); this.showConfDesc = showConfDesc; @@ -148,28 +130,6 @@ public DDLWork(HashSet inputs, HashSet outputs, } /** - * @param createTblDesc - * create table descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - CreateTableDesc createTblDesc) { - this(inputs, outputs); - - this.createTblDesc = createTblDesc; - } - - /** - * @param createTblLikeDesc - * create table like descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - CreateTableLikeDesc createTblLikeDesc) { - this(inputs, outputs); - - this.createTblLikeDesc = createTblLikeDesc; - } - - /** * @param createVwDesc * create view descriptor */ @@ -185,30 +145,10 @@ public DDLWork(HashSet inputs, HashSet outputs, * drop table descriptor */ public DDLWork(HashSet inputs, HashSet outputs, - DropTableDesc dropTblDesc) { - this(inputs, outputs); - - this.dropTblDesc = dropTblDesc; - } - - /** - * @param descTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - DescTableDesc descTblDesc) { - this(inputs, outputs); - - this.descTblDesc = descTblDesc; - } - - /** - * @param showTblsDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowTablesDesc showTblsDesc) { + DropPartitionDesc dropPartitionDesc) { this(inputs, outputs); - this.showTblsDesc = showTblsDesc; + this.dropPartitionDesc = dropPartitionDesc; } /** @@ -222,26 +162,6 @@ public DDLWork(HashSet inputs, HashSet outputs, } /** - * @param lockTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - LockTableDesc lockTblDesc) { - this(inputs, outputs); - - this.lockTblDesc = lockTblDesc; - } - - /** - * @param unlockTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - UnlockTableDesc unlockTblDesc) { - this(inputs, outputs); - - this.unlockTblDesc = unlockTblDesc; - } - - /** * @param showFuncsDesc */ public DDLWork(HashSet inputs, HashSet outputs, @@ -300,26 +220,6 @@ public DDLWork(HashSet inputs, HashSet outputs, } /** - * @param showCreateDbDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowCreateDatabaseDesc showCreateDbDesc) { - this(inputs, outputs); - - this.showCreateDbDesc = showCreateDbDesc; - } - - /** - * @param showCreateTblDesc - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowCreateTableDesc showCreateTblDesc) { - this(inputs, outputs); - - this.showCreateTblDesc = showCreateTblDesc; - } - - /** * @param addPartitionDesc * information about the partitions we want to add. */ @@ -360,28 +260,6 @@ public DDLWork(HashSet inputs, HashSet outputs, msckDesc = checkDesc; } - /** - * @param showTblStatusDesc - * show table status descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowTableStatusDesc showTblStatusDesc) { - this(inputs, outputs); - - this.showTblStatusDesc = showTblStatusDesc; - } - - /** - * @param showTblPropertiesDesc - * show table properties descriptor - */ - public DDLWork(HashSet inputs, HashSet outputs, - ShowTblPropertiesDesc showTblPropertiesDesc) { - this(inputs, outputs); - - this.showTblPropertiesDesc = showTblPropertiesDesc; - } - public DDLWork(HashSet inputs, HashSet outputs, RoleDDLDesc roleDDLDesc) { this(inputs, outputs); @@ -444,12 +322,6 @@ public DDLWork(HashSet inputs, HashSet outputs, } public DDLWork(HashSet inputs, HashSet outputs, - PreInsertTableDesc preInsertTableDesc) { - this(inputs, outputs); - this.preInsertTableDesc = preInsertTableDesc; - } - - public DDLWork(HashSet inputs, HashSet outputs, KillQueryDesc killQueryDesc) { this(inputs, outputs); this.killQueryDesc = killQueryDesc; @@ -536,22 +408,6 @@ public DDLWork(HashSet inputs, HashSet outputs, /** * @return the createTblDesc */ - @Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public CreateTableDesc getCreateTblDesc() { - return createTblDesc; - } - - /** - * @return the createTblDesc - */ - @Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public CreateTableLikeDesc getCreateTblLikeDesc() { - return createTblLikeDesc; - } - - /** - * @return the createTblDesc - */ @Explain(displayName = "Create View Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public CreateViewDesc getCreateViewDesc() { return createVwDesc; @@ -560,9 +416,9 @@ public CreateViewDesc getCreateViewDesc() { /** * @return the dropTblDesc */ - @Explain(displayName = "Drop Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public DropTableDesc getDropTblDesc() { - return dropTblDesc; + @Explain(displayName = "Drop Partition Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public DropPartitionDesc getDropPartitionDesc() { + return dropPartitionDesc; } /** @@ -583,14 +439,6 @@ public AlterMaterializedViewDesc getAlterMaterializedViewDesc() { } /** - * @return the showTblsDesc - */ - @Explain(displayName = "Show Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowTablesDesc getShowTblsDesc() { - return showTblsDesc; - } - - /** * @return the showColumnsDesc */ @Explain(displayName = "Show Columns Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -630,22 +478,6 @@ public AbortTxnsDesc getAbortTxnsDesc() { } /** - * @return the lockTblDesc - */ - @Explain(displayName = "Lock Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public LockTableDesc getLockTblDesc() { - return lockTblDesc; - } - - /** - * @return the unlockTblDesc - */ - @Explain(displayName = "Unlock Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public UnlockTableDesc getUnlockTblDesc() { - return unlockTblDesc; - } - - /** * @return the descFuncDesc */ @Explain(displayName = "Show Function Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -666,28 +498,6 @@ public ShowPartitionsDesc getShowPartsDesc() { return showPartsDesc; } - @Explain(displayName = "Show Create Database Operator", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowCreateDatabaseDesc getShowCreateDbDesc() { - return showCreateDbDesc; - } - - /** - * @return the showCreateTblDesc - */ - @Explain(displayName = "Show Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public ShowCreateTableDesc getShowCreateTblDesc() { - return showCreateTblDesc; - } - - /** - * @return the descTblDesc - */ - @Explain(displayName = "Describe Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public DescTableDesc getDescTblDesc() { - return descTblDesc; - } - /** * @return information about the partitions we want to add. */ @@ -717,17 +527,6 @@ public MsckDesc getMsckDesc() { return msckDesc; } - /** - * @return show table descriptor - */ - public ShowTableStatusDesc getShowTblStatusDesc() { - return showTblStatusDesc; - } - - public ShowTblPropertiesDesc getShowTblPropertiesDesc() { - return showTblPropertiesDesc; - } - public HashSet getInputs() { return inputs; } @@ -787,11 +586,6 @@ public AlterTableAlterPartDesc getAlterTableAlterPartDesc() { return alterTableAlterPartDesc; } - @Explain(displayName = "Truncate Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public TruncateTableDesc getTruncateTblDesc() { - return truncateTblDesc; - } - /** * @return information about the table partition to be exchanged */ @@ -815,11 +609,6 @@ public InsertCommitHookDesc getInsertCommitHookDesc() { return insertCommitHookDesc; } - @Explain(displayName = "Pre Insert operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public PreInsertTableDesc getPreInsertTableDesc() { - return preInsertTableDesc; - } - @Explain(displayName = "Create resource plan") public CreateResourcePlanDesc getCreateResourcePlanDesc() { return createResourcePlanDesc; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java deleted file mode 100644 index ee50232..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.Map; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * DescTableDesc. - * - */ -@Explain(displayName = "Describe Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DescTableDesc extends DDLDesc implements Serializable { - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } - - private static final long serialVersionUID = 1L; - - String tableName; - Map partSpec; - String resFile; - - String colPath; - boolean isExt; - boolean isFormatted; - - /** - * table name for the result of describe table. - */ - private static final String table = "describe"; - /** - * thrift ddl for the result of describe table. - */ - private static final String schema = "col_name,data_type,comment#string:string:string"; - private static final String colStatsSchema = "col_name,data_type,min,max,num_nulls," - + "distinct_count,avg_col_len,max_col_len,num_trues,num_falses,bitVector,comment" - + "#string:string:string:string:string:string:string:string:string:string:string:string"; - - public DescTableDesc() { - } - - /** - * @param partSpec - * @param resFile - * @param tableName - */ - public DescTableDesc(Path resFile, String tableName, - Map partSpec, String colPath) { - this.isExt = false; - this.isFormatted = false; - this.partSpec = partSpec; - this.resFile = resFile.toString(); - this.tableName = tableName; - this.colPath = colPath; - } - - public String getTable() { - return table; - } - - public static String getSchema(boolean colStats) { - if (colStats) { - return colStatsSchema; - } - return schema; - } - - /** - * @return the isExt - */ - public boolean isExt() { - return isExt; - } - - /** - * @param isExt - * the isExt to set - */ - public void setExt(boolean isExt) { - this.isExt = isExt; - } - - /** - * @return the isFormatted - */ - public boolean isFormatted() { - return isFormatted; - } - - /** - * @param isFormat - * the isFormat to set - */ - public void setFormatted(boolean isFormat) { - this.isFormatted = isFormat; - } - - /** - * @return the tableName - */ - @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @param colPath - * the colPath to set - */ - public void setColPath(String colPath) { - this.colPath = colPath; - } - - /** - * @return the columnPath - */ - public String getColumnPath() { - return colPath; - } - - /** - * @return the partSpec - */ - @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public Map getPartSpec() { - return partSpec; - } - - /** - * @param partSpec - * the partSpec to set - */ - public void setPartSpecs(Map partSpec) { - this.partSpec = partSpec; - } - - /** - * @return the resFile - */ - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java new file mode 100644 index 0000000..7acb04c --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropPartitionDesc.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +/** + * DropPartitionDesc. + */ +@Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropPartitionDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + public static class PartSpec { + private ExprNodeGenericFuncDesc partSpec; + // TODO: see if we can get rid of this... used in one place to distinguish archived parts + private int prefixLength; + + public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) { + this.partSpec = partSpec; + this.prefixLength = prefixLength; + } + + public ExprNodeGenericFuncDesc getPartSpec() { + return partSpec; + } + + public int getPrefixLength() { + return prefixLength; + } + } + + private final String tableName; + private final ArrayList partSpecs; + private final boolean ifPurge; + private final ReplicationSpec replicationSpec; + + public DropPartitionDesc(String tableName, Map> partSpecs, boolean ifPurge, + ReplicationSpec replicationSpec) { + this.tableName = tableName; + this.partSpecs = new ArrayList(partSpecs.size()); + for (Map.Entry> partSpec : partSpecs.entrySet()) { + int prefixLength = partSpec.getKey(); + for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) { + this.partSpecs.add(new PartSpec(expr, prefixLength)); + } + } + this.ifPurge = ifPurge; + this.replicationSpec = replicationSpec == null ? new ReplicationSpec() : replicationSpec; + } + + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTableName() { + return tableName; + } + + public ArrayList getPartSpecs() { + return partSpecs; + } + + public boolean getIfPurge() { + return ifPurge; + } + + /** + * @return what kind of replication scope this drop is running under. + * This can result in a "DROP IF OLDER THAN" kind of semantic + */ + public ReplicationSpec getReplicationSpec(){ + return replicationSpec; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java deleted file mode 100644 index 5d22154..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - -/** - * DropTableDesc. - * TODO: this is currently used for both drop table and drop partitions. - */ -@Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropTableDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - - public static class PartSpec { - public PartSpec(ExprNodeGenericFuncDesc partSpec, int prefixLength) { - this.partSpec = partSpec; - this.prefixLength = prefixLength; - } - public ExprNodeGenericFuncDesc getPartSpec() { - return partSpec; - } - public int getPrefixLength() { - return prefixLength; - } - private static final long serialVersionUID = 1L; - private ExprNodeGenericFuncDesc partSpec; - // TODO: see if we can get rid of this... used in one place to distinguish archived parts - private int prefixLength; - } - - String tableName; - ArrayList partSpecs; - TableType expectedType; - boolean ifExists; - boolean ifPurge; - ReplicationSpec replicationSpec; - boolean validationRequired; - - - public DropTableDesc() { - } - - /** - * @param tableName - * @param ifPurge - */ - public DropTableDesc( - String tableName, TableType expectedType, boolean ifExists, - boolean ifPurge, ReplicationSpec replicationSpec) { - this(tableName, expectedType, ifExists, ifPurge, replicationSpec, true); - } - - public DropTableDesc( - String tableName, TableType expectedType, boolean ifExists, - boolean ifPurge, ReplicationSpec replicationSpec, boolean validationRequired) { - this.tableName = tableName; - this.partSpecs = null; - this.expectedType = expectedType; - this.ifExists = ifExists; - this.ifPurge = ifPurge; - this.replicationSpec = replicationSpec; - this.validationRequired = validationRequired; - } - - public DropTableDesc(String tableName, Map> partSpecs, - TableType expectedType, boolean ifPurge, ReplicationSpec replicationSpec) { - this(tableName, partSpecs, expectedType, ifPurge, replicationSpec, true); - } - - public DropTableDesc(String tableName, Map> partSpecs, - TableType expectedType, boolean ifPurge, ReplicationSpec replicationSpec, boolean validationRequired) { - this.tableName = tableName; - this.partSpecs = new ArrayList(partSpecs.size()); - for (Map.Entry> partSpec : partSpecs.entrySet()) { - int prefixLength = partSpec.getKey(); - for (ExprNodeGenericFuncDesc expr : partSpec.getValue()) { - this.partSpecs.add(new PartSpec(expr, prefixLength)); - } - } - this.expectedType = expectedType; - this.ifPurge = ifPurge; - this.replicationSpec = replicationSpec; - this.validationRequired = validationRequired; - } - - /** - * @return the tableName - */ - @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the partSpecs - */ - public ArrayList getPartSpecs() { - return partSpecs; - } - - /** - * @return whether to expect a view being dropped - */ - public boolean getExpectView() { - return expectedType != null && expectedType == TableType.VIRTUAL_VIEW; - } - - /** - * @return whether to expect a materialized view being dropped - */ - public boolean getExpectMaterializedView() { - return expectedType != null && expectedType == TableType.MATERIALIZED_VIEW; - } - - /** - * @return whether IF EXISTS was specified - */ - public boolean getIfExists() { - return ifExists; - } - - /** - * @param ifExists - * set whether IF EXISTS was specified - */ - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; - } - - /** - * @return whether Purge was specified - */ - public boolean getIfPurge() { - return ifPurge; - } - - /** - * @param ifPurge - * set whether Purge was specified - */ - public void setIfPurge(boolean ifPurge) { - this.ifPurge = ifPurge; - } - - /** - * @return what kind of replication scope this drop is running under. - * This can result in a "DROP IF OLDER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec(){ - if (replicationSpec == null){ - this.replicationSpec = new ReplicationSpec(); - } - return this.replicationSpec; - } - - /** - * @return whether the table type validation is needed (false in repl case) - */ - public boolean getValidationRequired(){ - return this.validationRequired; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 017e1c7..451c89b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -323,7 +325,7 @@ public String getDatabaseName() { HiveConf conf) { switch (getDescType()) { case TABLE: - return TaskFactory.get(new DDLWork(inputs, outputs, createTblDesc), conf); + return TaskFactory.get(new DDLWork2(inputs, outputs, createTblDesc), conf); case VIEW: return TaskFactory.get(new DDLWork(inputs, outputs, createViewDesc), conf); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 46761ff..3abdc48 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -21,6 +21,7 @@ import java.io.Serializable; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java deleted file mode 100644 index 723678e..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.Map; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * LockTableDesc. - * - */ -@Explain(displayName = "Lock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class LockTableDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private String tableName; - private String mode; - private Map partSpec; - private String queryId; - private String queryStr; - - public LockTableDesc() { - } - - public LockTableDesc(String tableName, String mode, Map partSpec, String queryId) { - this.tableName = tableName; - this.mode = mode; - this.partSpec = partSpec; - this.queryId = queryId; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void setMode(String mode) { - this.mode = mode; - } - - public String getMode() { - return mode; - } - - public Map getPartSpec() { - return partSpec; - } - - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } - - public String getQueryId() { - return queryId; - } - - public void setQueryId(String queryId) { - this.queryId = queryId; - } - - public String getQueryStr() { - return queryStr; - } - - public void setQueryStr(String queryStr) { - this.queryStr = queryStr; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 33a5371..b668e40 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.TableScanOperator; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java deleted file mode 100644 index ba5d06e..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * ShowCreateDatabaseDesc. - * - */ -@Explain(displayName = "Show Create Database", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowCreateDatabaseDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - String resFile; - String dbName; - - /** - * thrift ddl for the result of showcreatedatabase. - */ - private static final String schema = "createdb_stmt#string"; - - public String getSchema() { - return schema; - } - - /** - * For serialization use only. - */ - public ShowCreateDatabaseDesc() { - } - - /** - * @param resFile - * @param dbName - * name of database to show - */ - public ShowCreateDatabaseDesc(String dbName, String resFile) { - this.dbName = dbName; - this.resFile = resFile; - } - - /** - * @return the resFile - */ - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the databaseName - */ - @Explain(displayName = "database name", - explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getDatabaseName() { - return dbName; - } - - /** - * @param dbName - * the dbName to set - */ - public void setDatabaseName(String dbName) { - this.dbName = dbName; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java deleted file mode 100644 index f96c529..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateTableDesc.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * ShowCreateTableDesc. - * - */ -@Explain(displayName = "Show Create Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowCreateTableDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - String resFile; - String tableName; - - /** - * table name for the result of showcreatetable. - */ - private static final String table = "show_create_table"; - /** - * thrift ddl for the result of showcreatetable. - */ - private static final String schema = "createtab_stmt#string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * For serialization use only. - */ - public ShowCreateTableDesc() { - } - - /** - * @param resFile - * @param tableName - * name of table to show - */ - public ShowCreateTableDesc(String tableName, String resFile) { - this.tableName = tableName; - this.resFile = resFile; - } - - /** - * @return the resFile - */ - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the tableName - */ - @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java deleted file mode 100644 index 5022e28..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.HashMap; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * ShowTableStatusDesc. - * - */ -@Explain(displayName = "Show Table Status", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowTableStatusDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - String pattern; - String resFile; - String dbName; - HashMap partSpec; - - /** - * table name for the result of show tables. - */ - private static final String table = "show_tablestatus"; - /** - * thrift ddl for the result of show tables. - */ - private static final String schema = "tab_name#string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * For serializatino use only. - */ - public ShowTableStatusDesc() { - } - - /** - * @param pattern - * names of tables to show - */ - public ShowTableStatusDesc(String resFile, String dbName, String pattern) { - this.dbName = dbName; - this.resFile = resFile; - this.pattern = pattern; - } - - /** - * @param resFile - * @param dbName - * data base name - * @param pattern - * names of tables to show - * @param partSpec - * partition specification - */ - public ShowTableStatusDesc(String resFile, String dbName, String pattern, - HashMap partSpec) { - this.dbName = dbName; - this.resFile = resFile; - this.pattern = pattern; - this.partSpec = partSpec; - } - - /** - * @return the pattern - */ - @Explain(displayName = "pattern") - public String getPattern() { - return pattern; - } - - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the resFile - */ - public String getResFile() { - return resFile; - } - - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFileString() { - return getResFile(); - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the database name - */ - @Explain(displayName = "database", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getDbName() { - return dbName; - } - - /** - * @param dbName - * the database name - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** - * @return the partSpec - */ - @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public HashMap getPartSpec() { - return partSpec; - } - - /** - * @param partSpec - * the partSpec to set - */ - public void setPartSpec(HashMap partSpec) { - this.partSpec = partSpec; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java deleted file mode 100644 index 0f7a3cd..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * ShowTablesDesc. - * - */ -@Explain(displayName = "Show Tables", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowTablesDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - - /** - * table name for the result of show tables. - */ - private static final String table = "show"; - - /** - * thrift ddl for the result of show tables and show views. - */ - private static final String TABLES_VIEWS_SCHEMA = "tab_name#string"; - - /** - * thrift ddl for the result of show extended tables. - */ - private static final String EXTENDED_TABLES_SCHEMA = "tab_name,table_type#string,string"; - - /** - * thrift ddl for the result of show tables. - */ - private static final String MATERIALIZED_VIEWS_SCHEMA = - "mv_name,rewrite_enabled,mode#string:string:string"; - - - TableType type; - String pattern; - TableType typeFilter; - String dbName; - String resFile; - boolean isExtended; - - public String getTable() { - return table; - } - - public String getSchema() { - if (type != null && type == TableType.MATERIALIZED_VIEW) { - return MATERIALIZED_VIEWS_SCHEMA; - } - return isExtended ? EXTENDED_TABLES_SCHEMA : TABLES_VIEWS_SCHEMA; - } - - public ShowTablesDesc() { - } - - /** - * @param resFile - */ - public ShowTablesDesc(Path resFile) { - this.resFile = resFile.toString(); - pattern = null; - } - - /** - * @param dbName - * name of database to show tables of - */ - public ShowTablesDesc(Path resFile, String dbName) { - this.resFile = resFile.toString(); - this.dbName = dbName; - } - - /** - * @param pattern - * names of tables to show - */ - public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType typeFilter, boolean isExtended) { - this.resFile = resFile.toString(); - this.dbName = dbName; - this.pattern = pattern; - this.typeFilter = typeFilter; - this.isExtended = isExtended; - } - - /** - * @param type - * type of the tables to show - */ - public ShowTablesDesc(Path resFile, String dbName, String pattern, TableType type) { - this.resFile = resFile.toString(); - this.dbName = dbName; - this.pattern = pattern; - this.type = type; - } - - /** - * @return the pattern - */ - @Explain(displayName = "pattern") - public String getPattern() { - return pattern; - } - - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the table type to be fetched - */ - @Explain(displayName = "type") - public TableType getType() { - return type; - } - - /** - * @param type - * the table type to set - */ - public void setType(TableType type) { - this.type = type; - } - - /** - * @return the resFile - */ - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFile() { - return resFile; - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the dbName - */ - @Explain(displayName = "database name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getDbName() { - return dbName; - } - - /** - * @param dbName - * the dbName to set - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** - * @return is extended - */ - @Explain(displayName = "extended", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, displayOnlyOnTrue = true) - public boolean isExtended() { - return isExtended; - } - - /** - * @param isExtended - * whether extended modifier is enabled - */ - public void setIsExtended(boolean isExtended) { - this.isExtended = isExtended; - } - - /** - * @return table type filter, null if it is not filtered - */ - @Explain(displayName = "table type filter", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public TableType getTypeFilter() { - return typeFilter; - } - - /** - * @param typeFilter - * table type filter for show statement - */ - public void setTypeFilter(TableType typeFilter) { - this.typeFilter = typeFilter; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java deleted file mode 100644 index aac0cf2..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTblPropertiesDesc.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.HashMap; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * ShowTblPropertiesDesc. - * - */ -@Explain(displayName = "Show Table Properties", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class ShowTblPropertiesDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - String resFile; - String tableName; - String propertyName; - - /** - * table name for the result of showtblproperties. - */ - private static final String table = "show_tableproperties"; - /** - * thrift ddl for the result of showtblproperties. - */ - private static final String schema = "prpt_name,prpt_value#string:string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * For serialization use only. - */ - public ShowTblPropertiesDesc() { - } - - /** - * @param resFile - * @param tableName - * name of table to show - * @param propertyName - * name of property to show - */ - public ShowTblPropertiesDesc(String resFile, String tableName, String propertyName) { - this.resFile = resFile; - this.tableName = tableName; - this.propertyName = propertyName; - } - - /** - * @return the resFile - */ - public String getResFile() { - return resFile; - } - - @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) - public String getResFileString() { - return getResFile(); - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(String resFile) { - this.resFile = resFile; - } - - /** - * @return the tableName - */ - @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the propertyName - */ - @Explain(displayName = "property name") - public String getPropertyName() { - return propertyName; - } - - /** - * @param propertyName - * the propertyName to set - */ - public void setPropertyName(String propertyName) { - this.propertyName = propertyName; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java deleted file mode 100644 index 61deb24..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; -import org.apache.hadoop.hive.ql.plan.Explain.Level; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * Truncates managed table or partition - */ -@Explain(displayName = "Truncate Table or Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class TruncateTableDesc extends DDLDesc implements DDLDesc.DDLDescWithWriteId { - private final static Logger LOG = LoggerFactory.getLogger(TruncateTableDesc.class); - - private static final long serialVersionUID = 1L; - - private String tableName; - private String fullTableName; - private Map partSpec; - private List columnIndexes; - private Path inputDir; - private Path outputDir; - private ListBucketingCtx lbCtx; - private ReplicationSpec replicationSpec; - private long writeId = 0; - private boolean isTransactional; - - public TruncateTableDesc() { - } - - - public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec) { - this(tableName, partSpec, replicationSpec, null); - } - - public TruncateTableDesc(String tableName, Map partSpec, - ReplicationSpec replicationSpec, Table table) { - this.tableName = tableName; - this.partSpec = partSpec; - this.replicationSpec = replicationSpec; - this.isTransactional = AcidUtils.isTransactionalTable(table); - this.fullTableName = table == null ? tableName : Warehouse.getQualifiedName(table.getTTable()); - } - - @Explain(displayName = "TableName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - @Explain(displayName = "Partition Spec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public Map getPartSpec() { - return partSpec; - } - - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } - - @Explain(displayName = "Column Indexes") - public List getColumnIndexes() { - return columnIndexes; - } - - public void setColumnIndexes(List columnIndexes) { - this.columnIndexes = columnIndexes; - } - - public Path getInputDir() { - return inputDir; - } - - public void setInputDir(Path inputDir) { - this.inputDir = inputDir; - } - - public Path getOutputDir() { - return outputDir; - } - - public void setOutputDir(Path outputDir) { - this.outputDir = outputDir; - } - - public ListBucketingCtx getLbCtx() { - return lbCtx; - } - - public void setLbCtx(ListBucketingCtx lbCtx) { - this.lbCtx = lbCtx; - } - - /** - * @return what kind of replication scope this truncate is running under. - * This can result in a "TRUNCATE IF NEWER THAN" kind of semantic - */ - public ReplicationSpec getReplicationSpec() { return this.replicationSpec; } - - @Override - public void setWriteId(long writeId) { - this.writeId = writeId; - } - - @Override - public String getFullTableName() { - return fullTableName; - } - - - @Override - public boolean mayNeedWriteId() { - return isTransactional; - } - - public long getWriteId() { - return writeId; - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " for " + getFullTableName(); - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java deleted file mode 100644 index 0b91463..0000000 --- ql/src/java/org/apache/hadoop/hive/ql/plan/UnlockTableDesc.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.Map; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.plan.Explain.Level; - - -/** - * UnlockTableDesc. - * - */ -@Explain(displayName = "Unlock Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class UnlockTableDesc extends DDLDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private String tableName; - private Map partSpec; - - public UnlockTableDesc() { - } - - public UnlockTableDesc(String tableName, Map partSpec) { - this.tableName = tableName; - this.partSpec = partSpec; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public Map getPartSpec() { - return partSpec; - } - - public void setPartSpec(Map partSpec) { - this.partSpec = partSpec; - } -} diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 198f7fd..e420cb3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.DriverUtils; +import org.apache.hadoop.hive.ql.ddl.table.ShowCreateTableOperation; import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.io.AcidInputFormat; @@ -652,7 +653,7 @@ private String buildMmCompactionCtQuery( String sh = t.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE); assert sh == null; // Not supposed to be a compactable table. if (!serdeParams.isEmpty()) { - DDLTask.appendSerdeParams(query, serdeParams); + ShowCreateTableOperation.appendSerdeParams(query, serdeParams); } query.append("STORED AS INPUTFORMAT '").append( HiveStringUtils.escapeHiveCommand(sd.getInputFormat())).append("' OUTPUTFORMAT '").append( diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java index 1ad0225..ae22b7f 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java @@ -22,9 +22,9 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.QueryPlan; -import org.apache.hadoop.hive.ql.exec.DDLTask; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; +import org.apache.hadoop.hive.ql.ddl.DDLTask2; +import org.apache.hadoop.hive.ql.ddl.DDLWork2; +import org.apache.hadoop.hive.ql.ddl.table.CreateTableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; import org.junit.Test; @@ -149,9 +149,9 @@ private String getColumnType(String query) { } QueryPlan plan = driver.getPlan(); - DDLTask task = (DDLTask) plan.getRootTasks().get(0); - DDLWork work = task.getWork(); - CreateTableDesc spec = work.getCreateTblDesc(); + DDLTask2 task = (DDLTask2) plan.getRootTasks().get(0); + DDLWork2 work = task.getWork(); + CreateTableDesc spec = (CreateTableDesc)work.getDDLDesc(); FieldSchema fs = spec.getCols().get(0); return fs.getType(); } diff --git ql/src/test/queries/clientpositive/db_ddl_explain.q ql/src/test/queries/clientpositive/db_ddl_explain.q new file mode 100644 index 0000000..7ad0bdd --- /dev/null +++ ql/src/test/queries/clientpositive/db_ddl_explain.q @@ -0,0 +1,20 @@ +EXPLAIN CREATE DATABASE d; +CREATE DATABASE d; + +EXPLAIN SHOW DATABASES; +SHOW DATABASES; + +EXPLAIN DESCRIBE DATABASE d; +DESCRIBE DATABASE d; + +EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis'); +ALTER DATABASE d SET dbproperties('test'='yesthisis'); + +EXPLAIN SHOW CREATE DATABASE d; +SHOW CREATE DATABASE d; + +EXPLAIN USE d; +USE d; + +EXPLAIN DROP DATABASE d; +DROP DATABASE d; diff --git ql/src/test/results/clientpositive/db_ddl_explain.q.out ql/src/test/results/clientpositive/db_ddl_explain.q.out new file mode 100644 index 0000000..8e85d75 --- /dev/null +++ ql/src/test/results/clientpositive/db_ddl_explain.q.out @@ -0,0 +1,171 @@ +PREHOOK: query: EXPLAIN CREATE DATABASE d +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: EXPLAIN CREATE DATABASE d +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Create Database + name: d + +PREHOOK: query: CREATE DATABASE d +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: CREATE DATABASE d +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:d +PREHOOK: query: EXPLAIN SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: EXPLAIN SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Databases + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SHOW DATABASES +PREHOOK: type: SHOWDATABASES +POSTHOOK: query: SHOW DATABASES +POSTHOOK: type: SHOWDATABASES +d +default +PREHOOK: query: EXPLAIN DESCRIBE DATABASE d +PREHOOK: type: DESCDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: EXPLAIN DESCRIBE DATABASE d +POSTHOOK: type: DESCDATABASE +POSTHOOK: Input: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Describe Database + database: d + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: DESCRIBE DATABASE d +PREHOOK: type: DESCDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: DESCRIBE DATABASE d +POSTHOOK: type: DESCDATABASE +POSTHOOK: Input: database:d +d location/in/test hive_test_user USER +PREHOOK: query: EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis') +PREHOOK: type: ALTERDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis') +POSTHOOK: type: ALTERDATABASE +POSTHOOK: Output: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Alter Database + name: d + properties: + test yesthisis + +PREHOOK: query: ALTER DATABASE d SET dbproperties('test'='yesthisis') +PREHOOK: type: ALTERDATABASE +PREHOOK: Output: database:d +POSTHOOK: query: ALTER DATABASE d SET dbproperties('test'='yesthisis') +POSTHOOK: type: ALTERDATABASE +POSTHOOK: Output: database:d +PREHOOK: query: EXPLAIN SHOW CREATE DATABASE d +PREHOOK: type: SHOW_CREATEDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: EXPLAIN SHOW CREATE DATABASE d +POSTHOOK: type: SHOW_CREATEDATABASE +POSTHOOK: Input: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Show Create Database + database name: d + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SHOW CREATE DATABASE d +PREHOOK: type: SHOW_CREATEDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: SHOW CREATE DATABASE d +POSTHOOK: type: SHOW_CREATEDATABASE +POSTHOOK: Input: database:d +CREATE DATABASE `d` +LOCATION +#### A masked pattern was here #### +WITH DBPROPERTIES ( + 'test'='yesthisis') +PREHOOK: query: EXPLAIN USE d +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: EXPLAIN USE d +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Switch Database + name: d + +PREHOOK: query: USE d +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:d +POSTHOOK: query: USE d +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:d +PREHOOK: query: EXPLAIN DROP DATABASE d +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:d +PREHOOK: Output: database:d +POSTHOOK: query: EXPLAIN DROP DATABASE d +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:d +POSTHOOK: Output: database:d +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Database + database: d + if exists: false + +PREHOOK: query: DROP DATABASE d +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:d +PREHOOK: Output: database:d +POSTHOOK: query: DROP DATABASE d +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:d +POSTHOOK: Output: database:d