commit 2e023fa194fad32248dfd6f53fe36360727b31ce Author: David Lavati Date: Fri Apr 26 14:55:15 2019 +0200 HIVE-21198 Introduce a database object reference class Change-Id: Ie48b5320d334530f7ac9744be3ed6af7c7ce8039 diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java index 596eaa9367..d2aecc17e5 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.accumulo.serde.AccumuloIndexParameters; import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe; import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; @@ -110,7 +111,7 @@ protected String getTableName(Table table) throws MetaException { if (DEFAULT_PREFIX.equals(table.getDbName())) { return table.getTableName(); } else { - return table.getDbName() + "." + table.getTableName(); + return TableName.getDbTable(table.getDbName(), table.getTableName()); } } diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out index 8a08332d86..dd42769603 100644 --- a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out +++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out @@ -52,9 +52,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.accumulo_table_1 properties: COLUMN_STATS_ACCURATE - table name: default.accumulo_table_1 Stage: Stage-1 Pre-Insert task @@ -533,9 +533,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.accumulo_table_3 properties: COLUMN_STATS_ACCURATE - table name: default.accumulo_table_3 Stage: Stage-1 Pre-Insert task diff --git a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out index a090362704..0f64e5a059 100644 --- a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out +++ b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out @@ -55,9 +55,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.src_x2 properties: COLUMN_STATS_ACCURATE - table name: default.src_x2 Stage: Stage-4 Stats Work diff --git a/beeline/src/java/org/apache/hive/beeline/Rows.java b/beeline/src/java/org/apache/hive/beeline/Rows.java index ce2ede0960..8826bcd079 100644 --- a/beeline/src/java/org/apache/hive/beeline/Rows.java +++ b/beeline/src/java/org/apache/hive/beeline/Rows.java @@ -79,7 +79,7 @@ boolean isPrimaryKey(int col) { if (primaryKeys[col] == null) { try { // this doesn't always work, since some JDBC drivers (e.g., - // Oracle's) return a blank string from getTableName. + // Oracle's) return a blank string from getDbTableName. String table = rsMeta.getTableName(col + 1); String column = rsMeta.getColumnName(col + 1); diff --git a/hbase-handler/src/test/results/positive/hbase_ddl.q.out b/hbase-handler/src/test/results/positive/hbase_ddl.q.out index db3ef9ee3b..47ea22c74f 100644 --- a/hbase-handler/src/test/results/positive/hbase_ddl.q.out +++ b/hbase-handler/src/test/results/positive/hbase_ddl.q.out @@ -50,9 +50,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.hbase_table_1 properties: COLUMN_STATS_ACCURATE - table name: default.hbase_table_1 Stage: Stage-1 Pre-Insert task diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out index a6c7a78fcb..90ef9fb9a2 100644 --- a/hbase-handler/src/test/results/positive/hbase_queries.q.out +++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out @@ -50,9 +50,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.hbase_table_1 properties: COLUMN_STATS_ACCURATE - table name: default.hbase_table_1 Stage: Stage-1 Pre-Insert task @@ -533,9 +533,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.hbase_table_3 properties: COLUMN_STATS_ACCURATE - table name: default.hbase_table_3 Stage: Stage-1 Pre-Insert task diff --git a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out index de67b9f964..c205440cc6 100644 --- a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out +++ b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out @@ -55,9 +55,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.src_x2 properties: COLUMN_STATS_ACCURATE - table name: default.src_x2 Stage: Stage-4 Stats Work diff --git a/hbase-handler/src/test/results/positive/hbasestats.q.out b/hbase-handler/src/test/results/positive/hbasestats.q.out index 7b1f152e34..514478e1fc 100644 --- a/hbase-handler/src/test/results/positive/hbasestats.q.out +++ b/hbase-handler/src/test/results/positive/hbasestats.q.out @@ -77,9 +77,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.users properties: COLUMN_STATS_ACCURATE - table name: default.users Stage: Stage-1 Pre-Insert task diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index 540ecd1546..98a44b80d9 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -19,7 +19,6 @@ package org.apache.hive.hcatalog.cli.SemanticAnalysis; import java.io.IOException; -import java.io.Serializable; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -175,7 +174,7 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, } try { - Table table = context.getHive().newTable(desc.getTableName()); + Table table = context.getHive().newTable(desc.getDbTableName()); if (desc.getLocation() != null) { table.setDataLocation(new Path(desc.getLocation())); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index b86a65f7e5..865aae6bca 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -306,7 +306,7 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive // table name. If columns have separate authorization domain, it // must be honored DescTableDesc descTable = (DescTableDesc)ddlDesc; - String tableName = extractTableName(descTable.getTableName()); + String tableName = extractTableName(descTable.getDbTableName()); authorizeTable(cntxt.getHive(), tableName, Privilege.SELECT); } else if (ddlDesc instanceof ShowTableStatusDesc) { ShowTableStatusDesc showTableStatus = (ShowTableStatusDesc)ddlDesc; @@ -336,7 +336,7 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive } else if (ddlDesc instanceof AlterTableSetLocationDesc) { AlterTableSetLocationDesc alterTable = (AlterTableSetLocationDesc)ddlDesc; Table table = hive.getTable(SessionState.get().getCurrentDatabase(), - Utilities.getDbTableName(alterTable.getTableName())[1], false); + Utilities.getDbTableName(alterTable.getDbTableName())[1], false); Partition part = null; if (alterTable.getPartitionSpec() != null) { diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index 3e18e91423..ecd6632442 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -403,7 +403,7 @@ public void testCTLPass() throws Exception { query = "create table like_table like junit_sem_analysis"; hcatDriver.run(query); // Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, likeTbl); -// assertEquals(likeTbl,tbl.getTableName()); +// assertEquals(likeTbl,tbl.getDbTableName()); // List cols = tbl.getSd().getCols(); // assertEquals(1, cols.size()); // assertEquals(new FieldSchema("a", "int", null), cols.get(0)); diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java index 7e76e6c7bd..a933758f02 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java @@ -88,7 +88,7 @@ public String getLocation() { * * @return the table name */ - @Deprecated // @deprecated in favour of {@link HCatPartition.#getTableName()}. To be removed in Hive 0.16. + @Deprecated // @deprecated in favour of {@link HCatPartition.#getDbTableName()}. To be removed in Hive 0.16. public String getTableName() { return hcatPartition == null? tableName : hcatPartition.getTableName(); } diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java index f97f7d8469..1fc8c908ba 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java @@ -99,7 +99,7 @@ public boolean getIfNotExists() { * * @return the table name */ - @Deprecated // @deprecated in favour of {@link HCatTable.#getTableName()}. To be removed in Hive 0.16. + @Deprecated // @deprecated in favour of {@link HCatTable.#getDbTableName()}. To be removed in Hive 0.16. public String getTableName() { return this.hcatTable.getTableName(); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreUtils.java index 3bb654de53..333db4db66 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreUtils.java @@ -154,7 +154,7 @@ static public Deserializer getDeserializer(Configuration conf, ObjectInspector oi = deserializer.getObjectInspector(); String[] names = tableName.split("\\."); String last_name = names[names.length - 1]; - for (int i = 1; i < names.length; i++) { + for (int i = 2; i < names.length; i++) { if (oi instanceof StructObjectInspector) { StructObjectInspector soi = (StructObjectInspector) oi; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index bae0ffd295..104f29d09e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -100,6 +100,7 @@ import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.parse.ParseUtils; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory; @@ -1074,8 +1075,8 @@ private void acquireLocks() throws CommandProcessorException { fsd1.getDirName().compareTo(fsd2.getDirName())); for (FileSinkDesc desc : acidSinks) { TableDesc tableInfo = desc.getTableInfo(); - long writeId = queryTxnMgr.getTableWriteId(Utilities.getDatabaseName(tableInfo.getTableName()), - Utilities.getTableName(tableInfo.getTableName())); + final TableName tn = HiveTableName.ofNullable(tableInfo.getTableName()); + long writeId = queryTxnMgr.getTableWriteId(tn.getDb(), tn.getTable()); desc.setTableWriteId(writeId); /** @@ -1105,8 +1106,8 @@ private void acquireLocks() throws CommandProcessorException { boolean hasAcidDdl = acidDdlDesc != null && acidDdlDesc.mayNeedWriteId(); if (hasAcidDdl) { String fqTableName = acidDdlDesc.getFullTableName(); - long writeId = queryTxnMgr.getTableWriteId( - Utilities.getDatabaseName(fqTableName), Utilities.getTableName(fqTableName)); + final TableName tn = HiveTableName.ofNullableWithNoDefault(fqTableName); + long writeId = queryTxnMgr.getTableWriteId(tn.getDb(), tn.getTable()); acidDdlDesc.setWriteId(writeId); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java index ebe2df4fc2..9e9d30f246 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableDesc.java @@ -21,9 +21,9 @@ import java.io.Serializable; import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.Explain; @@ -36,7 +36,7 @@ private static final long serialVersionUID = 1L; private final AlterTableType type; - private final String tableName; + private final TableName tableName; private final Map partitionSpec; private final ReplicationSpec replicationSpec; private final boolean isCascade; @@ -45,11 +45,11 @@ private Long writeId; - public AbstractAlterTableDesc(AlterTableType type, String tableName, Map partitionSpec, + public AbstractAlterTableDesc(AlterTableType type, TableName tableName, Map partitionSpec, ReplicationSpec replicationSpec, boolean isCascade, boolean expectView, Map props) throws SemanticException { this.type = type; - this.tableName = tableName.contains(".") ? tableName : String.join(".", Utilities.getDbTableName(tableName)); + this.tableName = tableName; this.partitionSpec = partitionSpec; this.replicationSpec = replicationSpec; this.isCascade = isCascade; @@ -62,8 +62,8 @@ public AlterTableType getType() { } @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; + public String getDbTableName() { + return tableName.getNotEmptyDbTable(); } @Explain(displayName = "partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -96,7 +96,7 @@ public EnvironmentContext getEnvironmentContext() { @Override public String getFullTableName() { - return tableName; + return tableName.getNotEmptyDbTable(); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java index e1f9fad454..d5f6976283 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableOperation.java @@ -56,15 +56,15 @@ public AbstractAlterTableOperation(DDLOperationContext context, T desc) { @Override public int execute() throws HiveException { - if (!AlterTableUtils.allowOperationInReplicationScope(context.getDb(), desc.getTableName(), null, + if (!AlterTableUtils.allowOperationInReplicationScope(context.getDb(), desc.getDbTableName(), null, desc.getReplicationSpec())) { // no alter, the table is missing either due to drop/rename which follows the alter. // or the existing table is newer than our update. - LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getTableName()); + LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getDbTableName()); return 0; } - Table oldTable = context.getDb().getTable(desc.getTableName()); + Table oldTable = context.getDb().getTable(desc.getDbTableName()); List partitions = getPartitions(oldTable, desc.getPartitionSpec(), context); // Don't change the table object returned by the metastore, as we'll mess with it's caches. @@ -147,7 +147,7 @@ private void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List

partitionSpec, ReplicationSpec replicationSpec, boolean isCascade, boolean expectView, Map props, Constraints constraints) throws SemanticException { super(type, tableName, partitionSpec, replicationSpec, isCascade, expectView, props); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java index 1b798ff376..07507c177d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableAddColumnsDesc.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; @@ -38,7 +39,7 @@ private final List newColumns; - public AlterTableAddColumnsDesc(String tableName, Map partitionSpec, boolean isCascade, + public AlterTableAddColumnsDesc(TableName tableName, Map partitionSpec, boolean isCascade, List newColumns) throws SemanticException { super(AlterTableType.ADDCOLS, tableName, partitionSpec, null, isCascade, false, null); this.newColumns = newColumns; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java index bcf9bad458..16625b5656 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnDesc.java @@ -19,6 +19,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableWithConstraintsDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.ddl.table.constaint.Constraints; @@ -40,7 +41,7 @@ private final boolean first; private final String afterColumn; - public AlterTableChangeColumnDesc(String tableName, Map partitionSpec, boolean isCascade, + public AlterTableChangeColumnDesc(TableName tableName, Map partitionSpec, boolean isCascade, Constraints constraints, String oldColumnName, String newColumnName, String newColumnType, String newColumnComment, boolean first, String afterColumn) throws SemanticException { super(AlterTableType.RENAME_COLUMN, tableName, partitionSpec, null, isCascade, false, null, constraints); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnOperation.java index 11373f51aa..a775a611da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableChangeColumnOperation.java @@ -52,7 +52,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti boolean isOrcSchemaEvolution = sd.getInputFormat().equals(OrcInputFormat.class.getName()) && AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()); if (isOrcSchemaEvolution && (desc.isFirst() || StringUtils.isNotBlank(desc.getAfterColumn()))) { - throw new HiveException(ErrorMsg.CANNOT_REORDER_COLUMNS, desc.getTableName()); + throw new HiveException(ErrorMsg.CANNOT_REORDER_COLUMNS, desc.getDbTableName()); } FieldSchema column = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java index 3a7ef9ecb8..3600084614 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsDesc.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; @@ -37,7 +38,7 @@ private final List newColumns; - public AlterTableReplaceColumnsDesc(String tableName, Map partitionSpec, boolean isCascade, + public AlterTableReplaceColumnsDesc(TableName tableName, Map partitionSpec, boolean isCascade, List newColumns) throws SemanticException { super(AlterTableType.REPLACE_COLUMNS, tableName, partitionSpec, null, isCascade, false, null); this.newColumns = newColumns; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java index 7ab0973d6a..599de64e1b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableReplaceColumnsOperation.java @@ -60,7 +60,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti context.getConsole().printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); } else if (!VALID_SERIALIZATION_LIBS.contains(serializationLib)) { - throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, desc.getTableName()); + throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, desc.getDbTableName()); } // adding columns and limited integer type promotion is not supported for ORC schema evolution @@ -71,7 +71,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti List replaceCols = desc.getNewColumns(); if (replaceCols.size() < existingCols.size()) { - throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, desc.getTableName()); + throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, desc.getDbTableName()); } } @@ -79,7 +79,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti if (ParquetHiveSerDe.isParquetTable(table) && AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()) && !desc.isCascade() && droppingColumns && table.isPartitioned()) { LOG.warn("Cannot drop columns from a partitioned parquet table without the CASCADE option"); - throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, desc.getTableName()); + throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, desc.getDbTableName()); } sd.setCols(desc.getNewColumns()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java index e263064ea2..edd3045237 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/column/AlterTableUpdateColumnsDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -33,7 +34,7 @@ public class AlterTableUpdateColumnsDesc extends AbstractAlterTableDesc { private static final long serialVersionUID = 1L; - public AlterTableUpdateColumnsDesc(String tableName, Map partitionSpec, boolean isCascade) + public AlterTableUpdateColumnsDesc(TableName tableName, Map partitionSpec, boolean isCascade) throws SemanticException { super(AlterTableType.UPDATE_COLUMNS, tableName, partitionSpec, null, isCascade, false, null); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java index bce2194e69..4241a4ba9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.table.constaint; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableWithConstraintsDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; @@ -32,7 +33,7 @@ public class AlterTableAddConstraintDesc extends AbstractAlterTableWithConstraintsDesc { private static final long serialVersionUID = 1L; - public AlterTableAddConstraintDesc(String tableName, ReplicationSpec replicationSpec, Constraints constraints) + public AlterTableAddConstraintDesc(TableName tableName, ReplicationSpec replicationSpec, Constraints constraints) throws SemanticException { super(AlterTableType.ADD_CONSTRAINT, tableName, null, replicationSpec, false, false, null, constraints); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintOperation.java index ddb88a0a0d..dfb130ad54 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableAddConstraintOperation.java @@ -38,11 +38,11 @@ public AlterTableAddConstraintOperation(DDLOperationContext context, AlterTableA @Override public int execute() throws Exception { - if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), desc.getTableName(), null, + if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), desc.getDbTableName(), null, desc.getReplicationSpec())) { // no alter, the table is missing either due to drop/rename which follows the alter. // or the existing table is newer than our update. - LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getTableName()); + LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getDbTableName()); return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintDesc.java index a7743a5d62..bdb311f178 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintDesc.java @@ -20,8 +20,8 @@ import java.io.Serializable; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.DDLDesc; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.Explain; @@ -34,22 +34,26 @@ public class AlterTableDropConstraintDesc implements DDLDesc, Serializable { private static final long serialVersionUID = 1L; - private final String tableName; + private final TableName tableName; private final ReplicationSpec replicationSpec; private final String constraintName; - public AlterTableDropConstraintDesc(String tableName, ReplicationSpec replicationSpec, String constraintName) + public AlterTableDropConstraintDesc(TableName tableName, ReplicationSpec replicationSpec, String constraintName) throws SemanticException { - this.tableName = String.join(".", Utilities.getDbTableName(tableName)); + this.tableName = tableName; this.replicationSpec = replicationSpec; this.constraintName = constraintName; } - @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { + public TableName getTableName() { return tableName; } + @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getDbTableName() { + return tableName.getNotEmptyDbTable(); + } + public ReplicationSpec getReplicationSpec() { return replicationSpec; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintOperation.java index 57b1807117..53b98425b2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constaint/AlterTableDropConstraintOperation.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.DDLUtils; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; /** @@ -35,17 +34,17 @@ public AlterTableDropConstraintOperation(DDLOperationContext context, AlterTable @Override public int execute() throws Exception { - if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), desc.getTableName(), null, + if (!DDLUtils.allowOperationInReplicationScope(context.getDb(), desc.getDbTableName(), null, desc.getReplicationSpec())) { // no alter, the table is missing either due to drop/rename which follows the alter. // or the existing table is newer than our update. - LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getTableName()); + LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", desc.getDbTableName()); return 0; } try { - context.getDb().dropConstraint(Utilities.getDatabaseName(desc.getTableName()), - Utilities.getTableName(desc.getTableName()), desc.getConstraintName()); + context.getDb().dropConstraint(desc.getTableName().getDb(), + desc.getTableName().getTable(), desc.getConstraintName()); } catch (NoSuchObjectException e) { throw new HiveException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java index aeb0fbfa1c..fe559bf92d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableDesc.java @@ -27,6 +27,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.PartitionManagementTask; import org.apache.hadoop.hive.metastore.TableType; @@ -74,8 +75,7 @@ private static final long serialVersionUID = 1L; private static final Logger LOG = LoggerFactory.getLogger(CreateTableDesc.class); - String databaseName; - String tableName; + TableName tableName; boolean isExternal; List cols; List partCols; @@ -123,7 +123,7 @@ public CreateTableDesc() { } - public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, + public CreateTableDesc(TableName tableName, boolean isExternal, boolean isTemporary, List cols, List partCols, List bucketCols, List sortCols, int numBuckets, String fieldDelim, String fieldEscape, String collItemDelim, @@ -145,12 +145,11 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal tblProps, ifNotExists, skewedColNames, skewedColValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); - this.databaseName = databaseName; this.colStats = colStats; this.replWriteId = writeId; } - public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, + public CreateTableDesc(TableName tableName, boolean isExternal, boolean isTemporary, List cols, List partColNames, List bucketCols, List sortCols, int numBuckets, String fieldDelim, String fieldEscape, String collItemDelim, @@ -163,7 +162,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal boolean isCTAS, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) { - this(databaseName, tableName, isExternal, isTemporary, cols, new ArrayList<>(), + this(tableName, isExternal, isTemporary, cols, new ArrayList<>(), bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serName, storageHandler, serdeProps, @@ -174,7 +173,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal this.isCTAS = isCTAS; } - public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary, + public CreateTableDesc(TableName tableName, boolean isExternal, boolean isTemporary, List cols, List partCols, List bucketCols, List sortCols, int numBuckets, String fieldDelim, String fieldEscape, String collItemDelim, @@ -243,15 +242,17 @@ public void setIfNotExists(boolean ifNotExists) { } @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { - return tableName; + public String getDbTableName() { + return tableName.getNotEmptyDbTable(); } - public String getDatabaseName() { - return databaseName; + public TableName getTableName(){ return tableName; } + + public String getDatabaseName(){ + return tableName.getDb(); } - public void setTableName(String tableName) { + public void setTableName(TableName tableName) { this.tableName = tableName; } @@ -723,16 +724,8 @@ public boolean isCTAS() { } public Table toTable(HiveConf conf) throws HiveException { - String databaseName = getDatabaseName(); - String tableName = getTableName(); - - if (databaseName == null || tableName.contains(".")) { - String[] names = Utilities.getDbTableName(tableName); - databaseName = names[0]; - tableName = names[1]; - } - Table tbl = new Table(databaseName, tableName); + Table tbl = new Table(tableName.getDb(), tableName.getTable()); if (getTblProps() != null) { tbl.getTTable().getParameters().putAll(getTblProps()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableOperation.java index b6b7d1b2fd..91795f72d3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/creation/CreateTableOperation.java @@ -76,7 +76,7 @@ public int execute() throws HiveException { replDataLocationChanged = true; } } else { - LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", desc.getTableName()); + LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", desc.getDbTableName()); return 0; // no replacement, the existing table state is newer than our update. } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java index 1ef0ac5f34..0fb14e64c5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableDesc.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -44,13 +45,13 @@ ); private final String resFile; - private final String tableName; + private final TableName tableName; private final Map partitionSpec; private final String columnPath; private final boolean isExtended; private final boolean isFormatted; - public DescTableDesc(Path resFile, String tableName, Map partitionSpec, String columnPath, + public DescTableDesc(Path resFile, TableName tableName, Map partitionSpec, String columnPath, boolean isExtended, boolean isFormatted) { this.resFile = resFile.toString(); this.tableName = tableName; @@ -66,7 +67,11 @@ public String getResFile() { } @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) - public String getTableName() { + public String getDbTableName() { + return tableName.getNotEmptyDbTable(); + } + + public TableName getTableName() { return tableName; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java index 04d0aa1707..5178fb5fb5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/DescTableOperation.java @@ -77,9 +77,10 @@ public DescTableOperation(DDLOperationContext context, DescTableDesc desc) { public int execute() throws Exception { Table table = getTable(); Partition part = getPartition(table); + final String dbTableName = desc.getDbTableName(); try (DataOutputStream outStream = DDLUtils.getOutputStream(new Path(desc.getResFile()), context)) { - LOG.debug("DDLTask: got data for {}", desc.getTableName()); + LOG.debug("DDLTask: got data for {}", dbTableName); List cols = new ArrayList<>(); List colStats = new ArrayList<>(); @@ -102,22 +103,22 @@ public int execute() throws Exception { // In case the query is served by HiveServer2, don't pad it with spaces, // as HiveServer2 output is consumed by JDBC/ODBC clients. boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); - context.getFormatter().describeTable(outStream, desc.getColumnPath(), desc.getTableName(), table, part, cols, + context.getFormatter().describeTable(outStream, desc.getColumnPath(), dbTableName, table, part, cols, desc.isFormatted(), desc.isExtended(), isOutputPadded, colStats); - LOG.debug("DDLTask: written data for {}", desc.getTableName()); + LOG.debug("DDLTask: written data for {}", dbTableName); } catch (SQLException e) { - throw new HiveException(e, ErrorMsg.GENERIC_ERROR, desc.getTableName()); + throw new HiveException(e, ErrorMsg.GENERIC_ERROR, dbTableName); } return 0; } private Table getTable() throws HiveException { - Table table = context.getDb().getTable(desc.getTableName(), false); + Table table = context.getDb().getTable(desc.getTableName().getDb(), desc.getTableName().getTable(), false); if (table == null) { - throw new HiveException(ErrorMsg.INVALID_TABLE, desc.getTableName()); + throw new HiveException(ErrorMsg.INVALID_TABLE, desc.getDbTableName()); } return table; } @@ -128,7 +129,7 @@ private Partition getPartition(Table table) throws HiveException { part = context.getDb().getPartition(table, desc.getPartitionSpec(), false); if (part == null) { throw new HiveException(ErrorMsg.INVALID_PARTITION, - StringUtils.join(desc.getPartitionSpec().keySet(), ','), desc.getTableName()); + StringUtils.join(desc.getPartitionSpec().keySet(), ','), desc.getDbTableName()); } } return part; @@ -191,11 +192,11 @@ private void getColumnsNoColumnPath(Table table, Partition partition, List cols, List colStats, Deserializer deserializer) throws SemanticException, HiveException, MetaException { - // when column name is specified in describe table DDL, colPath will be table_name.column_name - String colName = desc.getColumnPath().split("\\.")[1]; + // when column name is specified in describe table DDL, colPath will be db_name.table_name.column_name + String colName = desc.getColumnPath().split("\\.")[2]; List colNames = Lists.newArrayList(colName.toLowerCase()); - String[] dbTab = Utilities.getDbTableName(desc.getTableName()); + String[] dbTab = Utilities.getDbTableName(desc.getDbTableName()); if (null == part) { if (table.isPartitioned()) { Map tableProps = table.getParameters() == null ? diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java index db94fd992b..9d3d0b9c7e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/ShowTablePropertiesDesc.java @@ -20,6 +20,7 @@ import java.io.Serializable; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -34,10 +35,10 @@ public static final String SCHEMA = "prpt_name,prpt_value#string:string"; private final String resFile; - private final String tableName; + private final TableName tableName; private final String propertyName; - public ShowTablePropertiesDesc(String resFile, String tableName, String propertyName) { + public ShowTablePropertiesDesc(String resFile, TableName tableName, String propertyName) { this.resFile = resFile; this.tableName = tableName; this.propertyName = propertyName; @@ -54,7 +55,7 @@ public String getResFileString() { @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { - return tableName; + return tableName.getNotEmptyDbTable(); } @Explain(displayName = "property name") diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java index 4b748e980b..091c146940 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.table.misc; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; @@ -34,7 +35,7 @@ private final String newName; - public AlterTableRenameDesc(String tableName, ReplicationSpec replicationSpec, boolean expectView, String newName) + public AlterTableRenameDesc(TableName tableName, ReplicationSpec replicationSpec, boolean expectView, String newName) throws SemanticException { super(AlterTableType.RENAME, tableName, null, replicationSpec, false, expectView, null); this.newName = newName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java index 0b19b5d1c9..73ea400dcc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableRenameOperation.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; /** @@ -36,7 +37,7 @@ public AlterTableRenameOperation(DDLOperationContext context, AlterTableRenameDe @Override public int execute() throws HiveException { - String[] names = Utilities.getDbTableName(desc.getTableName()); + String[] names = Utilities.getDbTableName(desc.getDbTableName()); if (Utils.isBootstrapDumpInProgress(context.getDb(), names[0])) { LOG.error("DDLTask: Rename Table not allowed as bootstrap dump in progress"); throw new HiveException("Rename Table: Not allowed as bootstrap dump in progress"); @@ -47,7 +48,6 @@ public int execute() throws HiveException { @Override protected void doAlteration(Table table, Partition partition) throws HiveException { - table.setDbName(Utilities.getDatabaseName(desc.getNewName())); - table.setTableName(Utilities.getTableName(desc.getNewName())); + HiveTableName.setFrom(desc.getNewName(), table); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java index 1ba54d42b7..1fb11ce7c3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetOwnerDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.table.misc; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; @@ -34,7 +35,7 @@ private final PrincipalDesc ownerPrincipal; - public AlterTableSetOwnerDesc(String tableName, PrincipalDesc ownerPrincipal) throws SemanticException { + public AlterTableSetOwnerDesc(TableName tableName, PrincipalDesc ownerPrincipal) throws SemanticException { super(AlterTableType.OWNER, tableName, null, null, false, false, null); this.ownerPrincipal = ownerPrincipal; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java index b84ee5a85c..2d615a64cc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableSetPropertiesDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; @@ -40,7 +41,7 @@ private final boolean isFullAcidConversion; private final EnvironmentContext environmentContext; - public AlterTableSetPropertiesDesc(String tableName, Map partitionSpec, + public AlterTableSetPropertiesDesc(TableName tableName, Map partitionSpec, ReplicationSpec replicationSpec, boolean expectView, Map props, boolean isExplicitStatsUpdate, boolean isFullAcidConversion, EnvironmentContext environmentContext) throws SemanticException { super(AlterTableType.ADDPROPS, tableName, partitionSpec, replicationSpec, false, expectView, props); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java index 2ab2043c1f..bea9a365e9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/AlterTableUnsetPropertiesDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; @@ -38,7 +39,7 @@ private final boolean isExplicitStatsUpdate; private final EnvironmentContext environmentContext; - public AlterTableUnsetPropertiesDesc(String tableName, Map partitionSpec, + public AlterTableUnsetPropertiesDesc(TableName tableName, Map partitionSpec, ReplicationSpec replicationSpec, boolean expectView, Map props, boolean isExplicitStatsUpdate, EnvironmentContext environmentContext) throws SemanticException { super(AlterTableType.DROPPROPS, tableName, partitionSpec, replicationSpec, false, expectView, props); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java index cf271fc0c1..04fa1b9901 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/TruncateTableDesc.java @@ -39,8 +39,7 @@ public class TruncateTableDesc implements DDLDescWithWriteId, Serializable { private static final long serialVersionUID = 1L; - private final String tableName; - private final String fullTableName; + private final TableName tableName; private final Map partSpec; private final ReplicationSpec replicationSpec; private final boolean isTransactional; @@ -51,19 +50,18 @@ private long writeId = 0; - public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec) { + public TruncateTableDesc(TableName tableName, Map partSpec, ReplicationSpec replicationSpec) { this(tableName, partSpec, replicationSpec, null); } - public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec, + public TruncateTableDesc(TableName tableName, Map partSpec, ReplicationSpec replicationSpec, Table table) { this(tableName, partSpec, replicationSpec, table, null, null, null, null); } - public TruncateTableDesc(String tableName, Map partSpec, ReplicationSpec replicationSpec, + public TruncateTableDesc(TableName tableName, Map partSpec, ReplicationSpec replicationSpec, Table table, List columnIndexes, Path inputDir, Path outputDir, ListBucketingCtx lbCtx) { this.tableName = tableName; - this.fullTableName = table == null ? tableName : TableName.getDbTable(table.getDbName(), table.getTableName()); this.partSpec = partSpec; this.replicationSpec = replicationSpec; this.isTransactional = AcidUtils.isTransactionalTable(table); @@ -75,12 +73,12 @@ public TruncateTableDesc(String tableName, Map partSpec, Replica @Explain(displayName = "table name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { - return tableName; + return tableName.getNotEmptyDbTable(); } @Override public String getFullTableName() { - return fullTableName; + return tableName.getNotEmptyDbTable(); } @Explain(displayName = "partition spec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java index 990326493e..f5dc34200c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableDropPartitionDesc.java @@ -24,6 +24,7 @@ import java.util.Map; import org.apache.hadoop.hive.ql.ddl.DDLDesc; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; @@ -60,12 +61,12 @@ public int getPrefixLength() { } } - private final String tableName; + private final TableName tableName; private final ArrayList partSpecs; private final boolean ifPurge; private final ReplicationSpec replicationSpec; - public AlterTableDropPartitionDesc(String tableName, Map> partSpecs, + public AlterTableDropPartitionDesc(TableName tableName, Map> partSpecs, boolean ifPurge, ReplicationSpec replicationSpec) { this.tableName = tableName; this.partSpecs = new ArrayList(partSpecs.size()); @@ -81,7 +82,7 @@ public AlterTableDropPartitionDesc(String tableName, Map getPartSpecs() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java index 46d3193e33..f7e38c3a29 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/AlterTableRenamePartitionDesc.java @@ -22,6 +22,7 @@ import java.util.LinkedHashMap; import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; @@ -35,26 +36,24 @@ public class AlterTableRenamePartitionDesc implements DDLDescWithWriteId, Serializable { private static final long serialVersionUID = 1L; - private final String tableName; + private final TableName tableName; private final Map oldPartSpec; private final Map newPartSpec; private final ReplicationSpec replicationSpec; - private final String fqTableName; private long writeId; - public AlterTableRenamePartitionDesc(String tableName, Map oldPartSpec, + public AlterTableRenamePartitionDesc(TableName tableName, Map oldPartSpec, Map newPartSpec, ReplicationSpec replicationSpec, Table table) { this.tableName = tableName; this.oldPartSpec = new LinkedHashMap(oldPartSpec); this.newPartSpec = new LinkedHashMap(newPartSpec); this.replicationSpec = replicationSpec; - this.fqTableName = table != null ? (table.getDbName() + "." + table.getTableName()) : tableName; } @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { - return tableName; + return tableName.getNotEmptyDbTable(); } @Explain(displayName = "old partitions", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -87,7 +86,7 @@ public long getWriteId() { @Override public String getFullTableName() { - return fqTableName; + return tableName.getNotEmptyDbTable(); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java index 5b7f7df67f..a9a4724ea7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableClusteredByDesc.java @@ -22,6 +22,7 @@ import java.util.Map; import java.util.stream.Collectors; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; @@ -41,7 +42,7 @@ private final List bucketColumns; private final List sortColumns; - public AlterTableClusteredByDesc(String tableName, Map partitionSpec, int numberBuckets, + public AlterTableClusteredByDesc(TableName tableName, Map partitionSpec, int numberBuckets, List bucketColumns, List sortColumns) throws SemanticException { super(AlterTableType.CLUSTERED_BY, tableName, partitionSpec, null, false, false, null); this.numberBuckets = numberBuckets; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableCompactDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableCompactDesc.java index b32585bccd..3a512ba6e4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableCompactDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableCompactDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -36,9 +37,9 @@ private final boolean isBlocking; private final Map properties; - public AlterTableCompactDesc(String tableName, Map partitionSpec, String compactionType, + public AlterTableCompactDesc(TableName tableName, Map partitionSpec, String compactionType, boolean isBlocking, Map properties) { - this.tableName = tableName; + this.tableName = tableName.getNotEmptyDbTable(); this.partitionSpec = partitionSpec; this.compactionType = compactionType; this.isBlocking = isBlocking; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableConcatenateDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableConcatenateDesc.java index 281fcbffa5..5f5bbe4a0d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableConcatenateDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableConcatenateDesc.java @@ -21,6 +21,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; @@ -42,9 +43,9 @@ private final Class inputFormatClass; private final TableDesc tableDesc; - public AlterTableConcatenateDesc(String tableName, Map partitionSpec, ListBucketingCtx lbCtx, + public AlterTableConcatenateDesc(TableName tableName, Map partitionSpec, ListBucketingCtx lbCtx, Path inputDir, Path outputDir, Class inputFormatClass, TableDesc tableDesc) { - this.tableName = tableName; + this.tableName = tableName.getNotEmptyDbTable(); this.partitionSpec = partitionSpec; this.lbCtx = lbCtx; this.inputDir = inputDir; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java index 76b1800571..c8d1a599db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableIntoBucketsDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -35,7 +36,7 @@ private final int numberOfBuckets; - public AlterTableIntoBucketsDesc(String tableName, Map partitionSpec, int numberOfBuckets) + public AlterTableIntoBucketsDesc(TableName tableName, Map partitionSpec, int numberOfBuckets) throws SemanticException { super(AlterTableType.INTO_BUCKETS, tableName, partitionSpec, null, false, false, null); this.numberOfBuckets = numberOfBuckets; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java index e96e54ce14..37005f658d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotClusteredDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -33,7 +34,7 @@ public class AlterTableNotClusteredDesc extends AbstractAlterTableDesc { private static final long serialVersionUID = 1L; - public AlterTableNotClusteredDesc(String tableName, Map partitionSpec) throws SemanticException { + public AlterTableNotClusteredDesc(TableName tableName, Map partitionSpec) throws SemanticException { super(AlterTableType.NOT_CLUSTERED, tableName, partitionSpec, null, false, false, null); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java index 8276c824e0..016c18c60e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSkewedDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.table.storage; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -31,7 +32,7 @@ public class AlterTableNotSkewedDesc extends AbstractAlterTableDesc { private static final long serialVersionUID = 1L; - public AlterTableNotSkewedDesc(String tableName) throws SemanticException { + public AlterTableNotSkewedDesc(TableName tableName) throws SemanticException { super(AlterTableType.NOT_SKEWED, tableName, null, null, false, false, null); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java index 9d5f3b9025..30614f2dce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableNotSortedDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -33,7 +34,7 @@ public class AlterTableNotSortedDesc extends AbstractAlterTableDesc { private static final long serialVersionUID = 1L; - public AlterTableNotSortedDesc(String tableName, Map partitionSpec) throws SemanticException { + public AlterTableNotSortedDesc(TableName tableName, Map partitionSpec) throws SemanticException { super(AlterTableType.NOT_SORTED, tableName, partitionSpec, null, false, false, null); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java index f9ac33ec46..78ac94b3b5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -37,7 +38,7 @@ private final String outputFormat; private final String serdeName; - public AlterTableSetFileFormatDesc(String tableName, Map partitionSpec, String inputFormat, + public AlterTableSetFileFormatDesc(TableName tableName, Map partitionSpec, String inputFormat, String outputFormat, String serdeName) throws SemanticException { super(AlterTableType.SET_FILE_FORMAT, tableName, partitionSpec, null, false, false, null); this.inputFormat = inputFormat; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatOperation.java index 2f3d31a8ed..5b7c5acbe4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetFileFormatOperation.java @@ -43,7 +43,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti if (AlterTableUtils.isSchemaEvolutionEnabled(table, context.getConf()) && sd.getInputFormat().equals(OrcInputFormat.class.getName()) && !desc.getInputFormat().equals(OrcInputFormat.class.getName())) { - throw new HiveException(ErrorMsg.CANNOT_CHANGE_FILEFORMAT, "ORC", desc.getTableName()); + throw new HiveException(ErrorMsg.CANNOT_CHANGE_FILEFORMAT, "ORC", desc.getDbTableName()); } sd.setInputFormat(desc.getInputFormat()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java index fc25df0959..d79a8e4751 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetLocationDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -35,7 +36,7 @@ private final String location; - public AlterTableSetLocationDesc(String tableName, Map partitionSpec, String location) + public AlterTableSetLocationDesc(TableName tableName, Map partitionSpec, String location) throws SemanticException { super(AlterTableType.ALTERLOCATION, tableName, partitionSpec, null, false, false, null); this.location = location; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java index ec82da084b..6038cd7b34 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -35,7 +36,7 @@ private final String serdeName; - public AlterTableSetSerdeDesc(String tableName, Map partitionSpec, Map props, + public AlterTableSetSerdeDesc(TableName tableName, Map partitionSpec, Map props, String serdeName) throws SemanticException { super(AlterTableType.SET_SERDE, tableName, partitionSpec, null, false, false, props); this.serdeName = serdeName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeOperation.java index faa872478e..a447b3186e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdeOperation.java @@ -52,7 +52,7 @@ protected void doAlteration(Table table, Partition partition) throws HiveExcepti oldSerdeName.equalsIgnoreCase(OrcSerde.class.getName()) && !serdeName.equalsIgnoreCase(OrcSerde.class.getName())) { throw new HiveException(ErrorMsg.CANNOT_CHANGE_SERDE, OrcSerde.class.getSimpleName(), - desc.getTableName()); + desc.getDbTableName()); } sd.getSerdeInfo().setSerializationLib(serdeName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java index f35b4c1774..fdbdcf5573 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSerdePropsDesc.java @@ -20,6 +20,7 @@ import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -33,7 +34,7 @@ public class AlterTableSetSerdePropsDesc extends AbstractAlterTableDesc { private static final long serialVersionUID = 1L; - public AlterTableSetSerdePropsDesc(String tableName, Map partitionSpec, Map props) + public AlterTableSetSerdePropsDesc(TableName tableName, Map partitionSpec, Map props) throws SemanticException { super(AlterTableType.SET_SERDE_PROPS, tableName, partitionSpec, null, false, false, props); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java index 2512e27b7e..4cae3b0251 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSetSkewedLocationDesc.java @@ -22,6 +22,7 @@ import java.util.Map; import java.util.stream.Collectors; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -37,7 +38,7 @@ private final Map, String> skewedLocations; - public AlterTableSetSkewedLocationDesc(String tableName, Map partitionSpec, + public AlterTableSetSkewedLocationDesc(TableName tableName, Map partitionSpec, Map, String> skewedLocations) throws SemanticException { super(AlterTableType.SET_SKEWED_LOCATION, tableName, partitionSpec, null, false, false, null); this.skewedLocations = skewedLocations; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java index a39921984a..656aaa6ea9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/AlterTableSkewedByDesc.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.stream.Collectors; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -38,8 +39,8 @@ private final List> skewedColumnValues; private final boolean isStoredAsDirectories; - public AlterTableSkewedByDesc(String tableName, List skewedColumnNames, List> skewedColumnValues, - boolean isStoredAsDirectories) throws SemanticException { + public AlterTableSkewedByDesc(TableName tableName, List skewedColumnNames, + List> skewedColumnValues, boolean isStoredAsDirectories) throws SemanticException { super(AlterTableType.SKEWED_BY, tableName, null, null, false, false, null); this.skewedColumnNames = skewedColumnNames; this.skewedColumnValues = skewedColumnValues; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java index 58eacde5eb..a9efc1d679 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.view.materialized.alter.rebuild; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -53,26 +54,26 @@ public void analyzeInternal(ASTNode root) throws SemanticException { return; } - String[] qualifiedTableName = getQualifiedTableName((ASTNode)root.getChild(0)); - String dbDotTable = getDotName(qualifiedTableName); - ASTNode rewrittenAST = getRewrittenAST(qualifiedTableName, dbDotTable); + TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0)); + ASTNode rewrittenAST = getRewrittenAST(tableName); mvRebuildMode = MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD; - mvRebuildDbName = qualifiedTableName[0]; - mvRebuildName = qualifiedTableName[1]; + mvRebuildDbName = tableName.getDb(); + mvRebuildName = tableName.getTable(); - LOG.debug("Rebuilding materialized view " + dbDotTable); + LOG.debug("Rebuilding materialized view " + tableName.getNotEmptyDbTable()); super.analyzeInternal(rewrittenAST); } - private static final String REWRITTEN_INSERT_STATEMENT = "INSERT OVERWRITE TABLE `%s`.`%s` %s"; + private static final String REWRITTEN_INSERT_STATEMENT = "INSERT OVERWRITE TABLE %s %s"; - private ASTNode getRewrittenAST(String[] qualifiedTableName, String dbDotTable) throws SemanticException { + private ASTNode getRewrittenAST(TableName tableName) throws SemanticException { ASTNode rewrittenAST; // We need to go lookup the table and get the select statement and then parse it. try { - Table table = getTableObjectByName(dbDotTable, true); + Table table = getTableObjectByName(tableName.getNotEmptyDbTable(), true); if (!table.isMaterializedView()) { + // Cannot rebuild not materialized view throw new SemanticException(ErrorMsg.REBUILD_NO_MATERIALIZED_VIEW); } @@ -84,8 +85,8 @@ private ASTNode getRewrittenAST(String[] qualifiedTableName, String dbDotTable) } Context ctx = new Context(queryState.getConf()); - String rewrittenInsertStatement = String.format(REWRITTEN_INSERT_STATEMENT, qualifiedTableName[0], - qualifiedTableName[1], viewText); + String rewrittenInsertStatement = String.format(REWRITTEN_INSERT_STATEMENT, + tableName.getEscapedNotEmptyDbTable(), viewText); rewrittenAST = ParseUtils.parse(rewrittenInsertStatement, ctx); this.ctx.addRewrittenStatementContext(ctx); @@ -96,12 +97,13 @@ private ASTNode getRewrittenAST(String[] qualifiedTableName, String dbDotTable) LockState state; try { state = txnManager.acquireMaterializationRebuildLock( - qualifiedTableName[0], qualifiedTableName[1], txnManager.getCurrentTxnId()).getState(); + tableName.getDb(), tableName.getTable(), txnManager.getCurrentTxnId()).getState(); } catch (LockException e) { throw new SemanticException("Exception acquiring lock for rebuilding the materialized view", e); } if (state != LockState.ACQUIRED) { - throw new SemanticException("Another process is rebuilding the materialized view " + dbDotTable); + throw new SemanticException( + "Another process is rebuilding the materialized view " + tableName.getNotEmptyDbTable()); } } } catch (Exception e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteAnalyzer.java index 19447ea5d4..5a8ccfda0e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteAnalyzer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.view.materialized.alter.rewrite; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; @@ -47,8 +48,7 @@ public AlterMaterializedViewRewriteAnalyzer(QueryState queryState) throws Semant @Override public void analyzeInternal(ASTNode root) throws SemanticException { - String[] qualified = getQualifiedTableName((ASTNode)root.getChild(0)); - String fqMaterializedViewName = getDotName(qualified); + TableName tableName = getQualifiedTableName((ASTNode)root.getChild(0)); // Value for the flag boolean rewriteEnable; @@ -64,13 +64,13 @@ public void analyzeInternal(ASTNode root) throws SemanticException { } // It can be fully qualified name or use default database - Table materializedViewTable = getTable(fqMaterializedViewName, true); + Table materializedViewTable = getTable(tableName, true); // One last test: if we are enabling the rewrite, we need to check that query // only uses transactional (MM and ACID) tables if (rewriteEnable) { - for (String tableName : materializedViewTable.getCreationMetadata().getTablesUsed()) { - Table table = getTable(tableName, true); + for (String tName : materializedViewTable.getCreationMetadata().getTablesUsed()) { + Table table = getTable(tName, true); if (!AcidUtils.isTransactionalTable(table)) { throw new SemanticException("Automatic rewriting for materialized view cannot be enabled if the " + "materialized view uses non-transactional tables"); @@ -78,7 +78,8 @@ public void analyzeInternal(ASTNode root) throws SemanticException { } } - AlterMaterializedViewRewriteDesc desc = new AlterMaterializedViewRewriteDesc(fqMaterializedViewName, rewriteEnable); + AlterMaterializedViewRewriteDesc desc = + new AlterMaterializedViewRewriteDesc(tableName.getNotEmptyDbTable(), rewriteEnable); if (AcidUtils.isTransactionalTable(materializedViewTable)) { ddlDescWithWriteId = desc; } @@ -92,7 +93,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException { // Create task to update rewrite flag as dependant of previous one MaterializedViewUpdateDesc materializedViewUpdateDesc = - new MaterializedViewUpdateDesc(fqMaterializedViewName, rewriteEnable, !rewriteEnable, false); + new MaterializedViewUpdateDesc(tableName.getNotEmptyDbTable(), rewriteEnable, !rewriteEnable, false); DDLWork updateDdlWork = new DDLWork(getInputs(), getOutputs(), materializedViewUpdateDesc); targetTask.addDependentTask(TaskFactory.get(updateDdlWork, conf)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index a7770b4e53..3deba27dd6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -101,6 +101,7 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -2130,11 +2131,22 @@ public static String formatBinaryString(byte[] array, int start, int length) { * @param dbtable * @return String array with two elements, first is db name, second is table name * @throws SemanticException + * @deprecated use {@link TableName} or {@link org.apache.hadoop.hive.ql.parse.HiveTableName} instead */ + @Deprecated public static String[] getDbTableName(String dbtable) throws SemanticException { return getDbTableName(SessionState.get().getCurrentDatabase(), dbtable); } + /** + * Extract db and table name from dbtable string. + * @param defaultDb + * @param dbtable + * @return String array with two elements, first is db name, second is table name + * @throws SemanticException + * @deprecated use {@link TableName} or {@link org.apache.hadoop.hive.ql.parse.HiveTableName} instead + */ + @Deprecated public static String[] getDbTableName(String defaultDb, String dbtable) throws SemanticException { if (dbtable == null) { return new String[2]; @@ -2150,36 +2162,6 @@ public static String formatBinaryString(byte[] array, int start, int length) { } } - /** - * Accepts qualified name which is in the form of dbname.tablename and returns dbname from it - * - * @param dbTableName - * @return dbname - * @throws SemanticException input string is not qualified name - */ - public static String getDatabaseName(String dbTableName) throws SemanticException { - String[] split = dbTableName.split("\\."); - if (split.length != 2) { - throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, dbTableName); - } - return split[0]; - } - - /** - * Accepts qualified name which is in the form of dbname.tablename and returns tablename from it - * - * @param dbTableName - * @return tablename - * @throws SemanticException input string is not qualified name - */ - public static String getTableName(String dbTableName) throws SemanticException { - String[] split = dbTableName.split("\\."); - if (split.length != 2) { - throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, dbTableName); - } - return split[1]; - } - public static void validateColumnNames(List colNames, List checkCols) throws SemanticException { Iterator checkColsIter = checkCols.iterator(); @@ -2200,6 +2182,44 @@ public static void validateColumnNames(List colNames, List check } } + /** + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName}. All parts can be null. + * + * @param dbTableName + * @return a {@link TableName} + * @throws SemanticException + * @deprecated handle null values and use {@link TableName#fromString(String, String, String)} + */ + @Deprecated + public static TableName getNullableTableName(String dbTableName) throws SemanticException { + return getNullableTableName(dbTableName, SessionState.get().getCurrentDatabase()); + } + + /** + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName}. All parts can be null. + * + * @param dbTableName + * @param defaultDb + * @return a {@link TableName} + * @throws SemanticException + * @deprecated handle null values and use {@link TableName#fromString(String, String, String)} + */ + @Deprecated + public static TableName getNullableTableName(String dbTableName, String defaultDb) throws SemanticException { + if (dbTableName == null) { + return new TableName(null, null, null); + } else { + try { + return TableName + .fromString(dbTableName, SessionState.get().getCurrentCatalog(), defaultDb); + } catch (IllegalArgumentException e) { + throw new SemanticException(e.getCause()); + } + } + } + /** * Gets the default notification interval to send progress updates to the tracker. Useful for * operators that may not output data for a while. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index cd4f2a02a3..9a2ca76a43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -417,7 +417,7 @@ public int execute(DriverContext driverContext) { return 5; } - rj = jc.submitJob(job); + rj = jc.submitJob(job); if (driverContext.isShutdown()) { LOG.warn("Task was cancelled"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index e45116115f..df64ea9c60 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; @@ -349,7 +350,7 @@ private Path locationOnReplicaWarehouse(Table table, AlterTableAddPartitionDesc. Map> partSpecsExpr = ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec)); if (partSpecsExpr.size() > 0) { - AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(table.getFullyQualifiedName(), + AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(HiveTableName.of(table), partSpecsExpr, true, event.replicationSpec()); dropPtnTask = TaskFactory.get( new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index ed75df88f0..750386b853 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -20,6 +20,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; @@ -50,7 +51,6 @@ import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; -import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.slf4j.Logger; import java.io.Serializable; @@ -251,8 +251,8 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa HashMap mapProp = new HashMap<>(); mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replState); - String fqTableName = StatsUtils.getFullyQualifiedTableName(dbName, tableName); - AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(fqTableName, partSpec, + TableName tName = TableName.fromString(tableName, null, dbName); + AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(tName, partSpec, new ReplicationSpec(replState, replState), false, mapProp, false, false, null); Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index 98a0fa6fba..a5af560504 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.repl.ReplConst; import org.apache.hadoop.hive.common.repl.ReplScope; import org.apache.hadoop.hive.conf.HiveConf; @@ -44,7 +45,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; -import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -149,8 +149,8 @@ HashMap mapProp = new HashMap<>(); mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot); - String fqTableName = StatsUtils.getFullyQualifiedTableName(tableDesc.getDatabaseName(), tableDesc.getTableName()); - AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(fqTableName, partSpec, null, false, + final TableName tName = TableName.fromString(tableDesc.getTableName(), null, tableDesc.getDatabaseName()); + AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(tName, partSpec, null, false, mapProp, false, false, null); return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc), conf); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 67996c6db9..5b6f747516 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -2340,7 +2340,7 @@ public static ValidWriteIdList getTableValidWriteIdListWithTxnList( } public static String getFullTableName(String dbName, String tableName) { - return TableName.getDbTable(dbName.toLowerCase(), tableName.toLowerCase()); + return TableName.fromString(tableName, null, dbName).getNotEmptyDbTable().toLowerCase(); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 6143e85664..cc4f87b4bf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1085,7 +1085,7 @@ public void createTable(Table tbl, boolean ifNotExists, List checkConstraints) throws HiveException { try { - if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) { + if (org.apache.commons.lang3.StringUtils.isBlank(tbl.getDbName())) { tbl.setDbName(SessionState.get().getCurrentDatabase()); } if (tbl.getCols().size() == 0 || tbl.getSd().getColsSize() == 0) { @@ -1331,6 +1331,21 @@ public Table getTable(final String dbName, final String tableName) throws HiveEx } } + /** + * Returns metadata of the table + * + * @param tableName + * the tableName object + * @return the table + * @exception HiveException + * if there's an internal error or if the table doesn't exist + */ + public Table getTable(TableName tableName) throws HiveException { + + return tableName.getDb() == null ? this.getTable(tableName.getTable(), true) : this + .getTable(tableName.getDb(), tableName.getTable(), true); + } + /** * Returns metadata of the table * @@ -1349,7 +1364,7 @@ public Table getTable(final String dbName, final String tableName, } /** - * Returns metadata of the table + * Returns metadata of the table. * * @param dbName * the name of the database diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java index 424027077a..8257177466 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java @@ -28,6 +28,7 @@ import org.antlr.runtime.tree.Tree; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -97,6 +98,7 @@ public static boolean isAcidExport(ASTNode tree) throws SemanticException { //tableHandle can be null if table doesn't exist return tableHandle != null && AcidUtils.isFullAcidTable(tableHandle); } + private static String getTmptTableNameForExport(Table exportTable) { String tmpTableDb = exportTable.getDbName(); String tmpTableName = exportTable.getTableName() + "_" + UUID.randomUUID().toString().replace('-', '_'); @@ -123,7 +125,8 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { //need to create the table "manually" rather than creating a task since it has to exist to // compile the insert into T... - String newTableName = getTmptTableNameForExport(exportTable); //this is db.table + final String newTableName = getTmptTableNameForExport(exportTable); //this is db.table + final TableName newTableNameRef = HiveTableName.of(newTableName); Map tblProps = new HashMap<>(); tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.FALSE.toString()); String location; @@ -189,7 +192,7 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { // IMPORT is done for this archive and target table doesn't exist, it will be created as Acid. Map mapProps = new HashMap<>(); mapProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString()); - AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(newTableName, null, null, false, + AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(newTableNameRef, null, null, false, mapProps, false, false, null); addExportTask(rootTasks, exportTask, TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 538fa10a27..009b593eb4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -38,11 +38,11 @@ import org.antlr.runtime.TokenRewriteStream; import org.antlr.runtime.tree.Tree; -import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.type.Date; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; @@ -394,15 +394,10 @@ public static String getUnescapedName(ASTNode tableOrColumnNode, String currentD if (tokenType == HiveParser.TOK_TABNAME) { // table node Map.Entry dbTablePair = getDbTableNamePair(tableOrColumnNode); - String dbName = dbTablePair.getKey(); - String tableName = dbTablePair.getValue(); - if (dbName != null){ - return dbName + "." + tableName; - } - if (currentDatabase != null) { - return currentDatabase + "." + tableName; - } - return tableName; + return TableName.fromString(dbTablePair.getValue(), + null, + dbTablePair.getKey() == null ? currentDatabase : dbTablePair.getKey()) + .getNotEmptyDbTable(); } else if (tokenType == HiveParser.StringLiteral) { return unescapeSQLString(tableOrColumnNode.getText()); } @@ -410,32 +405,42 @@ public static String getUnescapedName(ASTNode tableOrColumnNode, String currentD return unescapeIdentifier(tableOrColumnNode.getText()); } - public static String[] getQualifiedTableName(ASTNode tabNameNode) throws SemanticException { - if (tabNameNode.getType() != HiveParser.TOK_TABNAME || - (tabNameNode.getChildCount() != 1 && tabNameNode.getChildCount() != 2)) { + /** + * Get the name reference of a DB table node. + * @param tabNameNode + * @return a {@link TableName}, not null. The catalog will be missing from this. + * @throws SemanticException + */ + public static TableName getQualifiedTableName(ASTNode tabNameNode) throws SemanticException { + // Ideally this would be removed, once the catalog is accessible in all use cases + return getQualifiedTableName(tabNameNode, null); + } + + /** + * Get the name reference of a DB table node. + * @param tabNameNode + * @param catalogName the catalog of the DB/object + * @return a {@link TableName}, not null. The catalog will be missing from this. + * @throws SemanticException + */ + public static TableName getQualifiedTableName(ASTNode tabNameNode, String catalogName) throws SemanticException { + if (tabNameNode.getType() != HiveParser.TOK_TABNAME || (tabNameNode.getChildCount() != 1 + && tabNameNode.getChildCount() != 2)) { throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME.getMsg(tabNameNode)); } if (tabNameNode.getChildCount() == 2) { - String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText()); - String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText()); + final String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + final String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText()); if (dbName.contains(".") || tableName.contains(".")) { throw new SemanticException(ErrorMsg.OBJECTNAME_CONTAINS_DOT.getMsg(tabNameNode)); } - return new String[] {dbName, tableName}; + return HiveTableName.ofNullable(tableName, dbName); } - String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + final String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); if (tableName.contains(".")) { throw new SemanticException(ErrorMsg.OBJECTNAME_CONTAINS_DOT.getMsg(tabNameNode)); } - return Utilities.getDbTableName(tableName); - } - - public static String getDotName(String[] qname) throws SemanticException { - String genericName = StringUtils.join(qname, "."); - if (qname.length != 2) { - throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, genericName); - } - return genericName; + return HiveTableName.ofNullable(tableName); } /** @@ -707,112 +712,104 @@ private static String spliceString(String str, int i, int length, String replace /** * Process the primary keys from the ast node and populate the SQLPrimaryKey list. */ - protected static void processPrimaryKeys(String databaseName, String tableName, - ASTNode child, List primaryKeys) throws SemanticException { + protected static void processPrimaryKeys(TableName tName, ASTNode child, List primaryKeys) + throws SemanticException { List primaryKeyInfos = new ArrayList(); generateConstraintInfos(child, primaryKeyInfos); - constraintInfosToPrimaryKeys(databaseName, tableName, primaryKeyInfos, primaryKeys); + constraintInfosToPrimaryKeys(tName, primaryKeyInfos, primaryKeys); } - protected static void processPrimaryKeys(String databaseName, String tableName, - ASTNode child, List columnNames, List primaryKeys) - throws SemanticException { + protected static void processPrimaryKeys(TableName tName, ASTNode child, List columnNames, + List primaryKeys) throws SemanticException { List primaryKeyInfos = new ArrayList(); generateConstraintInfos(child, columnNames, primaryKeyInfos, null, null); - constraintInfosToPrimaryKeys(databaseName, tableName, primaryKeyInfos, primaryKeys); + constraintInfosToPrimaryKeys(tName, primaryKeyInfos, primaryKeys); } - private static void constraintInfosToPrimaryKeys(String databaseName, String tableName, - List primaryKeyInfos, List primaryKeys) { + private static void constraintInfosToPrimaryKeys(TableName tName, List primaryKeyInfos, + List primaryKeys) { int i = 1; for (ConstraintInfo primaryKeyInfo : primaryKeyInfos) { - primaryKeys.add(new SQLPrimaryKey(databaseName, tableName, primaryKeyInfo.colName, - i++, primaryKeyInfo.constraintName, primaryKeyInfo.enable, - primaryKeyInfo.validate, primaryKeyInfo.rely)); + primaryKeys.add( + new SQLPrimaryKey(tName.getDb(), tName.getTable(), primaryKeyInfo.colName, i++, primaryKeyInfo.constraintName, + primaryKeyInfo.enable, primaryKeyInfo.validate, primaryKeyInfo.rely)); } } /** * Process the unique constraints from the ast node and populate the SQLUniqueConstraint list. */ - protected static void processUniqueConstraints(String catName, String databaseName, String tableName, - ASTNode child, List uniqueConstraints) throws SemanticException { + protected static void processUniqueConstraints(TableName tName, ASTNode child, + List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, uniqueInfos); - constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(tName, uniqueInfos, uniqueConstraints); } - protected static void processUniqueConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, List uniqueConstraints) - throws SemanticException { + protected static void processUniqueConstraints(TableName tName, ASTNode child, List columnNames, + List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, columnNames, uniqueInfos, null, null); - constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(tName, uniqueInfos, uniqueConstraints); } - private static void constraintInfosToUniqueConstraints(String catName, String databaseName, String tableName, - List uniqueInfos, List uniqueConstraints) { + private static void constraintInfosToUniqueConstraints(TableName tName, List uniqueInfos, + List uniqueConstraints) { int i = 1; for (ConstraintInfo uniqueInfo : uniqueInfos) { - uniqueConstraints.add(new SQLUniqueConstraint(catName, databaseName, tableName, uniqueInfo.colName, - i++, uniqueInfo.constraintName, uniqueInfo.enable, uniqueInfo.validate, uniqueInfo.rely)); + uniqueConstraints.add( + new SQLUniqueConstraint(tName.getCat(), tName.getDb(), tName.getTable(), uniqueInfo.colName, i++, + uniqueInfo.constraintName, uniqueInfo.enable, uniqueInfo.validate, uniqueInfo.rely)); } } - protected static void processCheckConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, - List checkConstraints, final ASTNode typeChild, - final TokenRewriteStream tokenRewriteStream) + protected static void processCheckConstraints(TableName tName, ASTNode child, List columnNames, + List checkConstraints, final ASTNode typeChild, final TokenRewriteStream tokenRewriteStream) throws SemanticException { List checkInfos = new ArrayList(); generateConstraintInfos(child, columnNames, checkInfos, typeChild, tokenRewriteStream); - constraintInfosToCheckConstraints(catName, databaseName, tableName, checkInfos, checkConstraints); + constraintInfosToCheckConstraints(tName, checkInfos, checkConstraints); } - private static void constraintInfosToCheckConstraints(String catName, String databaseName, String tableName, - List checkInfos, - List checkConstraints) { + private static void constraintInfosToCheckConstraints(TableName tName, List checkInfos, + List checkConstraints) { for (ConstraintInfo checkInfo : checkInfos) { - checkConstraints.add(new SQLCheckConstraint(catName, databaseName, tableName, checkInfo.colName, - checkInfo.defaultValue, checkInfo.constraintName, checkInfo.enable, - checkInfo.validate, checkInfo.rely)); + checkConstraints.add(new SQLCheckConstraint(tName.getCat(), tName.getDb(), tName.getTable(), checkInfo.colName, + checkInfo.defaultValue, checkInfo.constraintName, checkInfo.enable, checkInfo.validate, checkInfo.rely)); } } - protected static void processDefaultConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, List defaultConstraints, final ASTNode typeChild, - final TokenRewriteStream tokenRewriteStream) - throws SemanticException { + protected static void processDefaultConstraints(TableName tName, ASTNode child, List columnNames, + List defaultConstraints, final ASTNode typeChild, + final TokenRewriteStream tokenRewriteStream) throws SemanticException { List defaultInfos = new ArrayList(); generateConstraintInfos(child, columnNames, defaultInfos, typeChild, tokenRewriteStream); - constraintInfosToDefaultConstraints(catName, databaseName, tableName, defaultInfos, defaultConstraints); + constraintInfosToDefaultConstraints(tName, defaultInfos, defaultConstraints); } - private static void constraintInfosToDefaultConstraints( - String catName, String databaseName, String tableName, - List defaultInfos, List defaultConstraints) { + private static void constraintInfosToDefaultConstraints(TableName tName, List defaultInfos, + List defaultConstraints) { for (ConstraintInfo defaultInfo : defaultInfos) { - defaultConstraints.add(new SQLDefaultConstraint(catName, databaseName, tableName, - defaultInfo.colName, defaultInfo.defaultValue, defaultInfo.constraintName, - defaultInfo.enable, defaultInfo.validate, defaultInfo.rely)); + defaultConstraints.add( + new SQLDefaultConstraint(tName.getCat(), tName.getDb(), tName.getTable(), defaultInfo.colName, + defaultInfo.defaultValue, defaultInfo.constraintName, defaultInfo.enable, defaultInfo.validate, + defaultInfo.rely)); } } - protected static void processNotNullConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, List notNullConstraints) - throws SemanticException { + protected static void processNotNullConstraints(TableName tName, ASTNode child, List columnNames, + List notNullConstraints) throws SemanticException { List notNullInfos = new ArrayList(); generateConstraintInfos(child, columnNames, notNullInfos, null, null); - constraintInfosToNotNullConstraints(catName, databaseName, tableName, notNullInfos, notNullConstraints); + constraintInfosToNotNullConstraints(tName, notNullInfos, notNullConstraints); } - private static void constraintInfosToNotNullConstraints( - String catName, String databaseName, String tableName, List notNullInfos, + private static void constraintInfosToNotNullConstraints(TableName tName, List notNullInfos, List notNullConstraints) { for (ConstraintInfo notNullInfo : notNullInfos) { - notNullConstraints.add(new SQLNotNullConstraint(catName, databaseName, tableName, - notNullInfo.colName, notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, - notNullInfo.rely)); + notNullConstraints.add( + new SQLNotNullConstraint(tName.getCat(), tName.getDb(), tName.getTable(), notNullInfo.colName, + notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, notNullInfo.rely)); } } @@ -1097,12 +1094,13 @@ else if(child.getToken().getType() == HiveParser.TOK_CHECK_CONSTRAINT) { /** * Process the foreign keys from the AST and populate the foreign keys in the SQLForeignKey list + * @param tName catalog/db/table name reference * @param child Foreign Key token node * @param foreignKeys SQLForeignKey list * @throws SemanticException */ - protected static void processForeignKeys(String databaseName, String tableName, - ASTNode child, List foreignKeys) throws SemanticException { + protected static void processForeignKeys(TableName tName, ASTNode child, List foreignKeys) + throws SemanticException { // The ANTLR grammar looks like : // 1. KW_CONSTRAINT idfr=identifier KW_FOREIGN KW_KEY fkCols=columnParenthesesList // KW_REFERENCES tabName=tableName parCols=columnParenthesesList @@ -1160,16 +1158,16 @@ protected static void processForeignKeys(String databaseName, String tableName, " The number of foreign key columns should be same as number of parent key columns ")); } - String[] parentDBTbl = getQualifiedTableName((ASTNode) child.getChild(ptIndex)); + final TableName parentTblName = getQualifiedTableName((ASTNode) child.getChild(ptIndex)); for (int j = 0; j < child.getChild(fkIndex).getChildCount(); j++) { SQLForeignKey sqlForeignKey = new SQLForeignKey(); - sqlForeignKey.setFktable_db(databaseName); - sqlForeignKey.setFktable_name(tableName); + sqlForeignKey.setFktable_db(tName.getDb()); + sqlForeignKey.setFktable_name(tName.getTable()); Tree fkgrandChild = child.getChild(fkIndex).getChild(j); checkColumnName(fkgrandChild.getText()); sqlForeignKey.setFkcolumn_name(unescapeIdentifier(fkgrandChild.getText().toLowerCase())); - sqlForeignKey.setPktable_db(parentDBTbl[0]); - sqlForeignKey.setPktable_name(parentDBTbl[1]); + sqlForeignKey.setPktable_db(parentTblName.getDb()); + sqlForeignKey.setPktable_name(parentTblName.getTable()); Tree pkgrandChild = child.getChild(pkIndex).getChild(j); sqlForeignKey.setPkcolumn_name(unescapeIdentifier(pkgrandChild.getText().toLowerCase())); sqlForeignKey.setKey_seq(j+1); @@ -1224,34 +1222,33 @@ private static void checkColumnName(String columnName) throws SemanticException ASTNode child = (ASTNode) ast.getChild(i); switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: { - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + final TableName tName = + getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); - processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, - uniqueConstraints); + processUniqueConstraints(tName, child, uniqueConstraints); } break; case HiveParser.TOK_PRIMARY_KEY: { if (!primaryKeys.isEmpty()) { - throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT.getMsg( - "Cannot exist more than one primary key definition for the same table")); + throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT + .getMsg("Cannot exist more than one primary key definition for the same table")); } - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1], child, primaryKeys); + final TableName tName = getQualifiedTableName((ASTNode) parent.getChild(0)); + processPrimaryKeys(tName, child, primaryKeys); } break; case HiveParser.TOK_FOREIGN_KEY: { - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processForeignKeys(qualifiedTabName[0], qualifiedTabName[1], child, foreignKeys); + final TableName tName = getQualifiedTableName((ASTNode) parent.getChild(0)); + processForeignKeys(tName, child, foreignKeys); } break; case HiveParser.TOK_CHECK_CONSTRAINT: { + final TableName tName = + getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, null, + processCheckConstraints(tName, child, null, checkConstraints, null, tokenRewriteStream); } break; @@ -1282,39 +1279,35 @@ private static void checkColumnName(String columnName) throws SemanticException constraintChild = (ASTNode) child.getChild(2); } if (constraintChild != null) { - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + final TableName tName = + getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); // Process column constraint switch (constraintChild.getToken().getType()) { case HiveParser.TOK_CHECK_CONSTRAINT: - processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), checkConstraints, typeChild, - tokenRewriteStream); + processCheckConstraints(tName, constraintChild, ImmutableList.of(col.getName()), checkConstraints, + typeChild, tokenRewriteStream); break; case HiveParser.TOK_DEFAULT_VALUE: - processDefaultConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), defaultConstraints, typeChild, tokenRewriteStream); + processDefaultConstraints(tName, constraintChild, ImmutableList.of(col.getName()), defaultConstraints, + typeChild, tokenRewriteStream); break; case HiveParser.TOK_NOT_NULL: - processNotNullConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), notNullConstraints); + processNotNullConstraints(tName, constraintChild, ImmutableList.of(col.getName()), notNullConstraints); break; case HiveParser.TOK_UNIQUE: - processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), uniqueConstraints); + processUniqueConstraints(tName, constraintChild, ImmutableList.of(col.getName()), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: if (!primaryKeys.isEmpty()) { - throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT.getMsg( - "Cannot exist more than one primary key definition for the same table")); + throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT + .getMsg("Cannot exist more than one primary key definition for the same table")); } - processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), primaryKeys); + processPrimaryKeys(tName, constraintChild, ImmutableList.of(col.getName()), primaryKeys); break; case HiveParser.TOK_FOREIGN_KEY: - processForeignKeys(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processForeignKeys(tName, constraintChild, foreignKeys); break; default: @@ -1426,7 +1419,7 @@ private static String getUnionTypeStringFromAST(ASTNode typeNode) * */ public static class TableSpec { - public String tableName; + private TableName tableName; public Table tableHandle; public Map partSpec; // has to use LinkedHashMap to enforce order public Partition partHandle; @@ -1442,19 +1435,21 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast) public TableSpec(Table table) { tableHandle = table; - tableName = table.getDbName() + "." + table.getTableName(); + tableName = TableName.fromString(table.getTableName(), SessionState.get().getCurrentCatalog(), table.getDbName()); specType = SpecType.TABLE_ONLY; } - public TableSpec(Hive db, String tableName, Map partSpec) + public TableSpec(Hive db, TableName tableName, Map partSpec) throws HiveException { - this(db, tableName, partSpec, false); + this(db, tableName.getNotEmptyDbTable(), partSpec, false); } + public TableSpec(Hive db, String tableName, Map partSpec, boolean allowPartialPartitionsSpec) throws HiveException { Table table = db.getTable(tableName); tableHandle = table; - this.tableName = table.getDbName() + "." + table.getTableName(); + this.tableName = TableName.fromString(table.getTableName(), SessionState.get().getCurrentCatalog(), + table.getDbName()); if (partSpec == null) { specType = SpecType.TABLE_ONLY; } else if(allowPartialPartitionsSpec) { @@ -1474,7 +1469,8 @@ public TableSpec(Hive db, String tableName, Map partSpec, boolea public TableSpec(Table tableHandle, List partitions) throws HiveException { this.tableHandle = tableHandle; - this.tableName = tableHandle.getTableName(); + this.tableName = + TableName.fromString(tableHandle.getTableName(), tableHandle.getCatalogName(), tableHandle.getDbName()); if (partitions != null && !partitions.isEmpty()) { this.specType = SpecType.STATIC_PARTITION; this.partitions = partitions; @@ -1513,11 +1509,11 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit try { // get table metadata - tableName = getUnescapedName((ASTNode)ast.getChild(0)); + tableName = HiveTableName.withNoDefault(getUnescapedName((ASTNode)ast.getChild(0))); boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); if (testMode) { - tableName = conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX) - + tableName; + tableName = TableName.fromString(String.join("", conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX), + tableName.getTable()), tableName.getCat(), tableName.getDb()); // not that elegant, but hard to refactor } if (ast.getToken().getType() != HiveParser.TOK_CREATETABLE && ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW && @@ -1638,6 +1634,14 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit } } + public TableName getTableName() { + return tableName; + } + + public void setTableName(TableName tableName) { + this.tableName = tableName; + } + public Map getPartSpec() { return this.partSpec; } @@ -2172,12 +2176,12 @@ protected Database getDatabase(String dbName, boolean throwException) throws Sem return database; } - protected Table getTable(String[] qualified) throws SemanticException { - return getTable(qualified[0], qualified[1], true); + protected Table getTable(TableName tn) throws SemanticException { + return getTable(tn, true); } - protected Table getTable(String[] qualified, boolean throwException) throws SemanticException { - return getTable(qualified[0], qualified[1], throwException); + protected Table getTable(TableName tn, boolean throwException) throws SemanticException { + return getTable(tn.getDb(), tn.getTable(), throwException); } protected Table getTable(String tblName) throws SemanticException { @@ -2196,13 +2200,14 @@ protected Table getTable(String database, String tblName, boolean throwException : db.getTable(database, tblName, false); } catch (InvalidTableException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName), e); + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(TableName.fromString(tblName, null, database).getNotEmptyDbTable()), e); } catch (Exception e) { throw new SemanticException(e.getMessage(), e); } if (tab == null && throwException) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + // getTable needs a refactor with all ~50 occurences + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(TableName.fromString(tblName, null, database).getNotEmptyDbTable())); } return tab; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 3bee20326a..4a63eb6519 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; @@ -261,11 +262,10 @@ public void analyzeInternal(ASTNode input) throws SemanticException { switch (ast.getType()) { case HiveParser.TOK_ALTERTABLE: { ast = (ASTNode) input.getChild(1); - String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0)); + final TableName tName = + getQualifiedTableName((ASTNode) input.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); - String tableName = getDotName(qualified); HashMap partSpec = null; ASTNode partSpecNode = (ASTNode)input.getChild(2); if (partSpecNode != null) { @@ -275,78 +275,78 @@ public void analyzeInternal(ASTNode input) throws SemanticException { if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { partSpec = getPartSpec(partSpecNode); } else { - partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false); + partSpec = getValidatedPartSpec(getTable(tName), partSpecNode, conf, false); } } if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAME) { - analyzeAlterTableRename(qualified, ast, false); + analyzeAlterTableRename(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_TOUCH) { - analyzeAlterTableTouch(qualified, ast); + analyzeAlterTableTouch(tName, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ARCHIVE) { - analyzeAlterTableArchive(qualified, ast, false); + analyzeAlterTableArchive(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) { - analyzeAlterTableArchive(qualified, ast, true); + analyzeAlterTableArchive(tName, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) { - analyzeAlterTableAddCols(qualified, ast, partSpec); + analyzeAlterTableAddCols(tName, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) { - analyzeAlterTableReplaceCols(qualified, ast, partSpec); + analyzeAlterTableReplaceCols(tName, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) { - analyzeAlterTableRenameCol(catName, qualified, ast, partSpec); + analyzeAlterTableRenameCol(tName, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { - analyzeAlterTableAddParts(qualified, ast, false); + analyzeAlterTableAddParts(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { - analyzeAlterTableDropParts(qualified, ast, false); + analyzeAlterTableDropParts(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) { - analyzeAlterTablePartColType(qualified, ast); + analyzeAlterTablePartColType(tName, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, false, false); + analyzeAlterTableProps(tName, null, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, false, true); + analyzeAlterTableProps(tName, null, ast, false, true); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UPDATESTATS || ast.getType() == HiveParser.TOK_ALTERPARTITION_UPDATESTATS) { - analyzeAlterTableProps(qualified, partSpec, ast, false, false); + analyzeAlterTableProps(tName, partSpec, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_SKEWED) { - analyzeAlterTableSkewedby(qualified, ast); + analyzeAlterTableSkewedby(tName, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) { - analyzeExchangePartition(qualified, ast); + analyzeExchangePartition(tName, ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_FILEFORMAT) { - analyzeAlterTableFileFormat(ast, tableName, partSpec); + analyzeAlterTableFileFormat(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_LOCATION) { - analyzeAlterTableLocation(ast, tableName, partSpec); + analyzeAlterTableLocation(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_MERGEFILES) { - analyzeAlterTablePartMergeFiles(ast, tableName, partSpec); + analyzeAlterTablePartMergeFiles(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_SERIALIZER) { - analyzeAlterTableSerde(ast, tableName, partSpec); + analyzeAlterTableSerde(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_SERDEPROPERTIES) { - analyzeAlterTableSerdeProps(ast, tableName, partSpec); + analyzeAlterTableSerdeProps(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { - analyzeAlterTableRenamePart(ast, tableName, partSpec); + analyzeAlterTableRenamePart(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION) { - analyzeAlterTableSkewedLocation(ast, tableName, partSpec); + analyzeAlterTableSkewedLocation(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_BUCKETS || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_BUCKETS) { - analyzeAlterTableBucketNum(ast, tableName, partSpec); + analyzeAlterTableBucketNum(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { - analyzeAlterTableClusterSort(ast, tableName, partSpec); + analyzeAlterTableClusterSort(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_COMPACT) { - analyzeAlterTableCompact(ast, tableName, partSpec); + analyzeAlterTableCompact(ast, tName, partSpec); } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS || ast.getToken().getType() == HiveParser.TOK_ALTERPARTITION_UPDATECOLSTATS){ - analyzeAlterTableUpdateStats(ast, tableName, partSpec); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT) { - analyzeAlterTableDropConstraint(ast, tableName); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_ADDCONSTRAINT) { - analyzeAlterTableAddConstraint(ast, tableName); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLUMNS) { - analyzeAlterTableUpdateColumns(ast, tableName, partSpec); + analyzeAlterTableUpdateStats(ast, tName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT) { + analyzeAlterTableDropConstraint(ast, tName); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_ADDCONSTRAINT) { + analyzeAlterTableAddConstraint(ast, tName); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLUMNS) { + analyzeAlterTableUpdateColumns(ast, tName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_OWNER) { - analyzeAlterTableOwner(ast, tableName); + analyzeAlterTableOwner(ast, tName); } break; } @@ -401,18 +401,18 @@ public void analyzeInternal(ASTNode input) throws SemanticException { analyzeMetastoreCheck(ast); break; case HiveParser.TOK_ALTERVIEW: { - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + final TableName tName = getQualifiedTableName((ASTNode) ast.getChild(0)); ast = (ASTNode) ast.getChild(1); if (ast.getType() == HiveParser.TOK_ALTERVIEW_PROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, true, false); + analyzeAlterTableProps(tName, null, ast, true, false); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, true, true); + analyzeAlterTableProps(tName, null, ast, true, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_ADDPARTS) { - analyzeAlterTableAddParts(qualified, ast, true); + analyzeAlterTableAddParts(tName, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPARTS) { - analyzeAlterTableDropParts(qualified, ast, true); + analyzeAlterTableDropParts(tName, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_RENAME) { - analyzeAlterTableRename(qualified, ast, true); + analyzeAlterTableRename(tName, ast, true); } break; } @@ -459,7 +459,7 @@ private void analyzeCacheMetadata(ASTNode ast) throws SemanticException { rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); } - private void analyzeAlterTableUpdateStats(ASTNode ast, String tblName, Map partSpec) + private void analyzeAlterTableUpdateStats(ASTNode ast, TableName tblName, Map partSpec) throws SemanticException { String colName = getUnescapedName((ASTNode) ast.getChild(0)); Map mapProp = getProps((ASTNode) (ast.getChild(1)).getChild(0)); @@ -500,8 +500,8 @@ private void analyzeAlterTableUpdateStats(ASTNode ast, String tblName, Map partSpec = getPartSpec((ASTNode) root.getChild(1)); @@ -607,9 +608,9 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { // Is this a truncate column command ASTNode colNamesNode = (ASTNode) ast.getFirstChildWithType(HiveParser.TOK_TABCOLNAME); if (colNamesNode == null) { - truncateTask = getTruncateTaskWithoutColumnNames(tableName, partSpec, table); + truncateTask = getTruncateTaskWithoutColumnNames(tName, partSpec, table); } else { - truncateTask = getTruncateTaskWithColumnNames(root, tableName, table, partSpec, colNamesNode); + truncateTask = getTruncateTaskWithColumnNames(root, tName, table, partSpec, colNamesNode); } rootTasks.add(truncateTask); @@ -657,7 +658,7 @@ private void addTruncateTableOutputs(ASTNode root, Table table, Map getTruncateTaskWithoutColumnNames(String tableName, Map partSpec, Table table) { + private Task getTruncateTaskWithoutColumnNames(TableName tableName, Map partSpec, Table table) { TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec, null, table); if (truncateTblDesc.mayNeedWriteId()) { setAcidDdlDesc(truncateTblDesc); @@ -667,7 +668,7 @@ private void addTruncateTableOutputs(ASTNode root, Table table, Map getTruncateTaskWithColumnNames(ASTNode root, String tableName, Table table, + private Task getTruncateTaskWithColumnNames(ASTNode root, TableName tName, Table table, Map partSpec, ASTNode colNamesNode) throws SemanticException { try { List columnNames = getColumnNames(colNamesNode); @@ -762,7 +763,7 @@ private void addTruncateTableOutputs(ASTNode root, Table table, Map(columnIndexes), oldTblPartLoc, queryTmpdir, lbCtx); if (truncateTblDesc.mayNeedWriteId()) { setAcidDdlDesc(truncateTblDesc); @@ -771,7 +772,7 @@ private void addTruncateTableOutputs(ASTNode root, Table table, Map truncateTask = TaskFactory.get(ddlWork); - addInputsOutputsAlterTable(tableName, partSpec, null, AlterTableType.TRUNCATE, false); + addInputsOutputsAlterTable(tName, partSpec, null, AlterTableType.TRUNCATE, false); ddlWork.setNeedLock(true); TableDesc tblDesc = Utilities.getTableDesc(table); // Write the output to temporary directory and move it to the final location at the end @@ -871,10 +872,9 @@ private boolean hasConstraintsEnabled(final String tblName) throws SemanticExcep return false; } - private void analyzeAlterTableProps(String[] qualified, HashMap partSpec, - ASTNode ast, boolean expectView, boolean isUnset) throws SemanticException { + private void analyzeAlterTableProps(TableName tableName, Map partSpec, ASTNode ast, + boolean expectView, boolean isUnset) throws SemanticException { - String tableName = getDotName(qualified); Map mapProp = getProps((ASTNode) (ast.getChild(0)).getChild(0)); EnvironmentContext environmentContext = null; // we need to check if the properties are valid, especially for stats. @@ -897,10 +897,10 @@ private void analyzeAlterTableProps(String[] qualified, HashMap } // if table is being modified to be external we need to make sure existing table // doesn't have enabled constraint since constraints are disallowed with such tables - else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ - if(hasConstraintsEnabled(qualified[1])){ + else if (entry.getKey().equals("external") && entry.getValue().equals("true")) { + if (hasConstraintsEnabled(tableName.getTable())) { throw new SemanticException( - ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Table: " + tableName + " has constraints enabled." + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Table: " + tableName.getDbTable() + " has constraints enabled." + "Please remove those constraints to change this property.")); } } @@ -922,7 +922,7 @@ else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ } boolean isToTxn = AcidUtils.isTablePropertyTransactional(mapProp) || mapProp.containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); - boolean isExplicitStatsUpdate = changeStatsSucceeded && AcidUtils.isTransactionalTable(getTable(qualified, true)); + boolean isExplicitStatsUpdate = changeStatsSucceeded && AcidUtils.isTransactionalTable(getTable(tableName, true)); AbstractAlterTableDesc alterTblDesc = null; DDLWork ddlWork = null; @@ -948,7 +948,7 @@ else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ } else { addPropertyReadEntry(mapProp, inputs); boolean isAcidConversion = isToTxn && AcidUtils.isFullAcidTable(mapProp) - && !AcidUtils.isFullAcidTable(getTable(qualified, true)); + && !AcidUtils.isFullAcidTable(getTable(tableName, true)); alterTblDesc = new AlterTableSetPropertiesDesc(tableName, partSpec, null, expectView, mapProp, isExplicitStatsUpdate, isAcidConversion, environmentContext); addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, alterTblDesc.getType(), isToTxn); @@ -976,7 +976,7 @@ public DDLDescWithWriteId getAcidDdlDesc() { return ddlDescWithWriteId; } - private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName, Map partSpec) + private void analyzeAlterTableSerdeProps(ASTNode ast, TableName tableName, Map partSpec) throws SemanticException { Map mapProp = getProps((ASTNode) (ast.getChild(0)).getChild(0)); AlterTableSetSerdePropsDesc alterTblDesc = new AlterTableSetSerdePropsDesc(tableName, partSpec, mapProp); @@ -985,7 +985,7 @@ private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName, Map partSpec) + private void analyzeAlterTableSerde(ASTNode ast, TableName tableName, Map partSpec) throws SemanticException { String serdeName = unescapeSQLString(ast.getChild(0).getText()); Map props = (ast.getChildCount() > 1) ? getProps((ASTNode) (ast.getChild(1)).getChild(0)) : null; @@ -995,7 +995,7 @@ private void analyzeAlterTableSerde(ASTNode ast, String tableName, Map partSpec) + private void analyzeAlterTableFileFormat(ASTNode ast, TableName tableName, Map partSpec) throws SemanticException { StorageFormat format = new StorageFormat(conf); ASTNode child = (ASTNode) ast.getChild(0); @@ -1025,7 +1025,7 @@ private WriteType determineAlterTableWriteType(Table tab, AbstractAlterTableDesc return WriteEntity.determineAlterTableWriteType(op); } - private void addInputsOutputsAlterTable(String tableName, Map partSpec, + private void addInputsOutputsAlterTable(TableName tableName, Map partSpec, AbstractAlterTableDesc desc, AlterTableType op, boolean doForceExclusive) throws SemanticException { boolean isCascade = desc != null && desc.isCascade(); boolean alterPartitions = partSpec != null && !partSpec.isEmpty(); @@ -1088,7 +1088,7 @@ private void addInputsOutputsAlterTable(String tableName, Map pa } } - private void analyzeAlterTableOwner(ASTNode ast, String tableName) throws SemanticException { + private void analyzeAlterTableOwner(ASTNode ast, TableName tableName) throws SemanticException { PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) ast.getChild(0)); if (ownerPrincipal.getType() == null) { @@ -1103,7 +1103,7 @@ private void analyzeAlterTableOwner(ASTNode ast, String tableName) throws Semant rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); } - private void analyzeAlterTableLocation(ASTNode ast, String tableName, Map partSpec) + private void analyzeAlterTableLocation(ASTNode ast, TableName tableName, Map partSpec) throws SemanticException { String newLocation = unescapeSQLString(ast.getChild(0).getText()); @@ -1127,8 +1127,7 @@ private void analyzeAlterTableLocation(ASTNode ast, String tableName, Map partSpec) + private void analyzeAlterTablePartMergeFiles(ASTNode ast, TableName tableName, Map partSpec) throws SemanticException { Path oldTblPartLoc = null; @@ -1136,130 +1135,134 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, Table tblObj = null; ListBucketingCtx lbCtx = null; - try { - tblObj = getTable(tableName); - if(AcidUtils.isTransactionalTable(tblObj)) { - LinkedHashMap newPartSpec = null; - if (partSpec != null) { - newPartSpec = new LinkedHashMap<>(partSpec); - } + tblObj = getTable(tableName); + if(AcidUtils.isTransactionalTable(tblObj)) { + LinkedHashMap newPartSpec = null; + if (partSpec != null) { + newPartSpec = new LinkedHashMap<>(partSpec); + } - boolean isBlocking = !HiveConf.getBoolVar(conf, - ConfVars.TRANSACTIONAL_CONCATENATE_NOBLOCK, false); - AlterTableCompactDesc desc = new AlterTableCompactDesc(tableName, newPartSpec, "MAJOR", isBlocking, null); + boolean isBlocking = !HiveConf.getBoolVar(conf, + ConfVars.TRANSACTIONAL_CONCATENATE_NOBLOCK, false); + AlterTableCompactDesc desc = new AlterTableCompactDesc(tableName, newPartSpec, "MAJOR", isBlocking, null); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); - return; - } + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + return; + } - List bucketCols = null; - Class inputFormatClass = null; - boolean isArchived = false; - if (tblObj.isPartitioned()) { - if (partSpec == null) { + List bucketCols = null; + Class inputFormatClass = null; + boolean isArchived = false; + if (tblObj.isPartitioned()) { + if (partSpec == null) { + throw new SemanticException("source table " + tableName + + " is partitioned but no partition desc found."); + } else { + Partition part = getPartition(tblObj, partSpec, false); + if (part == null) { throw new SemanticException("source table " + tableName - + " is partitioned but no partition desc found."); - } else { - Partition part = getPartition(tblObj, partSpec, false); - if (part == null) { - throw new SemanticException("source table " + tableName - + " is partitioned but partition not found."); - } - bucketCols = part.getBucketCols(); + + " is partitioned but partition not found."); + } + bucketCols = part.getBucketCols(); + try { inputFormatClass = part.getInputFormatClass(); - isArchived = ArchiveUtils.isArchived(part); + } catch (HiveException e) { + throw new SemanticException(e); + } + isArchived = ArchiveUtils.isArchived(part); - Path tabPath = tblObj.getPath(); - Path partPath = part.getDataLocation(); + Path tabPath = tblObj.getPath(); + Path partPath = part.getDataLocation(); - // if the table is in a different dfs than the partition, - // replace the partition's dfs with the table's dfs. - newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri() - .getAuthority(), partPath.toUri().getPath()); + // if the table is in a different dfs than the partition, + // replace the partition's dfs with the table's dfs. + newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri() + .getAuthority(), partPath.toUri().getPath()); - oldTblPartLoc = partPath; + oldTblPartLoc = partPath; - lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), - part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories()); - } - } else { - inputFormatClass = tblObj.getInputFormatClass(); - bucketCols = tblObj.getBucketCols(); + lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), + part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories()); + } + } else { + inputFormatClass = tblObj.getInputFormatClass(); + bucketCols = tblObj.getBucketCols(); - // input and output are the same - oldTblPartLoc = tblObj.getPath(); - newTblPartLoc = tblObj.getPath(); + // input and output are the same + oldTblPartLoc = tblObj.getPath(); + newTblPartLoc = tblObj.getPath(); - lbCtx = constructListBucketingCtx(tblObj.getSkewedColNames(), tblObj.getSkewedColValues(), - tblObj.getSkewedColValueLocationMaps(), tblObj.isStoredAsSubDirectories()); - } + lbCtx = constructListBucketingCtx(tblObj.getSkewedColNames(), tblObj.getSkewedColValues(), + tblObj.getSkewedColValueLocationMaps(), tblObj.isStoredAsSubDirectories()); + } - // throw a HiveException for other than rcfile and orcfile. - if (!(inputFormatClass.equals(RCFileInputFormat.class) || inputFormatClass.equals(OrcInputFormat.class))) { - throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_FILE_FORMAT.getMsg()); - } + // throw a HiveException for other than rcfile and orcfile. + if (!(inputFormatClass.equals(RCFileInputFormat.class) || inputFormatClass.equals(OrcInputFormat.class))) { + throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_FILE_FORMAT.getMsg()); + } - // throw a HiveException if the table/partition is bucketized - if (bucketCols != null && bucketCols.size() > 0) { - throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_BUCKETED.getMsg()); - } + // throw a HiveException if the table/partition is bucketized + if (bucketCols != null && bucketCols.size() > 0) { + throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_BUCKETED.getMsg()); + } - // throw a HiveException if the table/partition is archived - if (isArchived) { - throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_PARTITION_ARCHIVED.getMsg()); - } + // throw a HiveException if the table/partition is archived + if (isArchived) { + throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_PARTITION_ARCHIVED.getMsg()); + } - // non-native and non-managed tables are not supported as MoveTask requires filenames to be in specific format, - // violating which can cause data loss - if (tblObj.isNonNative()) { - throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_NON_NATIVE.getMsg()); - } + // non-native and non-managed tables are not supported as MoveTask requires filenames to be in specific format, + // violating which can cause data loss + if (tblObj.isNonNative()) { + throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_NON_NATIVE.getMsg()); + } - if (tblObj.getTableType() != TableType.MANAGED_TABLE) { - throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_NOT_MANAGED.getMsg()); - } + if (tblObj.getTableType() != TableType.MANAGED_TABLE) { + throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_NOT_MANAGED.getMsg()); + } - addInputsOutputsAlterTable(tableName, partSpec, null, AlterTableType.MERGEFILES, false); - TableDesc tblDesc = Utilities.getTableDesc(tblObj); - Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); - AlterTableConcatenateDesc mergeDesc = new AlterTableConcatenateDesc(tableName, partSpec, lbCtx, oldTblPartLoc, - queryTmpdir, inputFormatClass, Utilities.getTableDesc(tblObj)); - DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc); - ddlWork.setNeedLock(true); - Task mergeTask = TaskFactory.get(ddlWork); - // No need to handle MM tables - unsupported path. - LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap<>() : partSpec); - ltd.setLbCtx(lbCtx); - ltd.setInheritTableSpecs(true); - Task moveTsk = - TaskFactory.get(new MoveWork(null, null, ltd, null, false)); - mergeTask.addDependentTask(moveTsk); + addInputsOutputsAlterTable(tableName, partSpec, null, AlterTableType.MERGEFILES, false); + TableDesc tblDesc = Utilities.getTableDesc(tblObj); + Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); + AlterTableConcatenateDesc mergeDesc = new AlterTableConcatenateDesc(tableName, partSpec, lbCtx, oldTblPartLoc, + queryTmpdir, inputFormatClass, Utilities.getTableDesc(tblObj)); + DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc); + ddlWork.setNeedLock(true); + Task mergeTask = TaskFactory.get(ddlWork); + // No need to handle MM tables - unsupported path. + LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, + partSpec == null ? new HashMap<>() : partSpec); + ltd.setLbCtx(lbCtx); + ltd.setInheritTableSpecs(true); + Task moveTsk = + TaskFactory.get(new MoveWork(null, null, ltd, null, false)); + mergeTask.addDependentTask(moveTsk); - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - BasicStatsWork basicStatsWork; - if (oldTblPartLoc.equals(newTblPartLoc)) { - // If we're merging to the same location, we can avoid some metastore calls + if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + BasicStatsWork basicStatsWork; + if (oldTblPartLoc.equals(newTblPartLoc)) { + // If we're merging to the same location, we can avoid some metastore calls + try{ TableSpec tableSpec = new TableSpec(db, tableName, partSpec); basicStatsWork = new BasicStatsWork(tableSpec); - } else { - basicStatsWork = new BasicStatsWork(ltd); + } catch (HiveException e){ + throw new SemanticException(e); } - basicStatsWork.setNoStatsAggregator(true); - basicStatsWork.setClearAggregatorStats(true); - StatsWork columnStatsWork = new StatsWork(tblObj, basicStatsWork, conf); - - Task statTask = TaskFactory.get(columnStatsWork); - moveTsk.addDependentTask(statTask); + } else { + basicStatsWork = new BasicStatsWork(ltd); } + basicStatsWork.setNoStatsAggregator(true); + basicStatsWork.setClearAggregatorStats(true); + StatsWork columnStatsWork = new StatsWork(tblObj, basicStatsWork, conf); - rootTasks.add(mergeTask); - } catch (Exception e) { - throw new SemanticException(e); + Task statTask = TaskFactory.get(columnStatsWork); + moveTsk.addDependentTask(statTask); } + + rootTasks.add(mergeTask); } - private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, Map partSpec) + private void analyzeAlterTableClusterSort(ASTNode ast, TableName tableName, Map partSpec) throws SemanticException { AbstractAlterTableDesc alterTblDesc; @@ -1294,7 +1297,7 @@ private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, Map partSpec) throws SemanticException { String type = unescapeSQLString(ast.getChild(0).getText()).toLowerCase(); @@ -1319,19 +1322,21 @@ private void analyzeAlterTableCompact(ASTNode ast, String tableName, for(int i = 0; i < ast.getChildCount(); i++) { switch(ast.getChild(i).getType()) { - case HiveParser.TOK_TABLEPROPERTIES: - mapProp = getProps((ASTNode) (ast.getChild(i)).getChild(0)); - break; - case HiveParser.TOK_BLOCKING: - isBlocking = true; - break; + case HiveParser.TOK_TABLEPROPERTIES: + mapProp = getProps((ASTNode) (ast.getChild(i)).getChild(0)); + break; + case HiveParser.TOK_BLOCKING: + isBlocking = true; + break; + default: + break; } } AlterTableCompactDesc desc = new AlterTableCompactDesc(tableName, newPartSpec, type, isBlocking, mapProp); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); } - private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName) + private void analyzeAlterTableDropConstraint(ASTNode ast, TableName tableName) throws SemanticException { String constraintName = unescapeIdentifier(ast.getChild(0).getText()); AlterTableDropConstraintDesc alterTblDesc = new AlterTableDropConstraintDesc(tableName, null, constraintName); @@ -1339,13 +1344,12 @@ private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName) rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) + private void analyzeAlterTableAddConstraint(ASTNode ast, TableName tableName) throws SemanticException { ASTNode parent = (ASTNode) ast.getParent(); - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + final TableName tName = getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); ASTNode child = (ASTNode) ast.getChild(0); List primaryKeys = new ArrayList<>(); List foreignKeys = new ArrayList<>(); @@ -1354,21 +1358,17 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: - BaseSemanticAnalyzer.processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], - child, uniqueConstraints); + BaseSemanticAnalyzer.processUniqueConstraints(tName, child, uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: - BaseSemanticAnalyzer.processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1], - child, primaryKeys); + BaseSemanticAnalyzer.processPrimaryKeys(tName, child, primaryKeys); break; case HiveParser.TOK_FOREIGN_KEY: - BaseSemanticAnalyzer.processForeignKeys(qualifiedTabName[0], qualifiedTabName[1], - child, foreignKeys); + BaseSemanticAnalyzer.processForeignKeys(tName, child, foreignKeys); break; case HiveParser.TOK_CHECK_CONSTRAINT: - BaseSemanticAnalyzer.processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], - child, null, checkConstraints, child, - this.ctx.getTokenRewriteStream()); + BaseSemanticAnalyzer + .processCheckConstraints(tName, child, null, checkConstraints, child, this.ctx.getTokenRewriteStream()); break; default: throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg( @@ -1382,7 +1382,7 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableUpdateColumns(ASTNode ast, String tableName, + private void analyzeAlterTableUpdateColumns(ASTNode ast, TableName tableName, HashMap partSpec) throws SemanticException { boolean isCascade = false; @@ -1434,12 +1434,8 @@ static public String getFullyQualifiedName(ASTNode ast) { // return column name if exists, column could be DOT separated. // example: lintString.$elem$.myint // return table name for column name if no column has been specified. - static public String getColPath( - Hive db, - ASTNode node, - String dbName, - String tableName, - Map partSpec) throws SemanticException { + static public String getColPath(Hive db, ASTNode node, TableName tableName, Map partSpec) + throws SemanticException { // if this ast has only one child, then no column name specified. if (node.getChildCount() == 1) { @@ -1457,19 +1453,14 @@ static public String getColPath( } if (columnNode != null) { - if (dbName == null) { - return tableName + "." + QualifiedNameUtil.getFullyQualifiedName(columnNode); - } else { - return tableName.substring(dbName.length() + 1, tableName.length()) + "." + - QualifiedNameUtil.getFullyQualifiedName(columnNode); - } + return String.join(".", tableName.getNotEmptyDbTable(), QualifiedNameUtil.getFullyQualifiedName(columnNode)); } else { return null; } } // get partition metadata - static public Map getPartitionSpec(Hive db, ASTNode ast, String tableName) + static Map getPartitionSpec(Hive db, ASTNode ast, TableName tableName) throws SemanticException { ASTNode partNode = null; // if this ast has only one child, then no partition spec specified. @@ -1491,10 +1482,10 @@ static public String getColPath( if (partNode != null) { Table tab = null; try { - tab = db.getTable(tableName); + tab = db.getTable(tableName.getNotEmptyDbTable()); } catch (InvalidTableException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName.getNotEmptyDbTable()), e); } catch (HiveException e) { throw new SemanticException(e.getMessage(), e); @@ -1551,7 +1542,7 @@ private void validateDatabase(String databaseName) throws SemanticException { } } - private void validateTable(String tableName, Map partSpec) + private void validateTable(TableName tableName, Map partSpec) throws SemanticException { Table tab = getTable(tableName); if (partSpec != null) { @@ -1576,8 +1567,7 @@ private void validateTable(String tableName, Map partSpec) private void analyzeDescribeTable(ASTNode ast) throws SemanticException { ASTNode tableTypeExpr = (ASTNode) ast.getChild(0); - String dbName = null; - String tableName = null; + final TableName tableName; String colPath = null; Map partSpec = null; @@ -1588,10 +1578,10 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { if (((ASTNode) tableTypeExpr.getChild(0)).getType() == HiveParser.TOK_TABNAME) { tableNode = (ASTNode) tableTypeExpr.getChild(0); if (tableNode.getChildCount() == 1) { - tableName = ((ASTNode) tableNode.getChild(0)).getText(); + tableName = HiveTableName.of(((ASTNode) tableNode.getChild(0)).getText()); } else { - dbName = ((ASTNode) tableNode.getChild(0)).getText(); - tableName = dbName + "." + ((ASTNode) tableNode.getChild(1)).getText(); + tableName = TableName.fromString(((ASTNode) tableNode.getChild(1)).getText(), + SessionState.get().getCurrentCatalog(), ((ASTNode) tableNode.getChild(0)).getText()); } } else { throw new SemanticException(((ASTNode) tableTypeExpr.getChild(0)).getText() + " is not an expected token type"); @@ -1601,12 +1591,12 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { partSpec = QualifiedNameUtil.getPartitionSpec(db, tableTypeExpr, tableName); // process the third child node,if exists, to get partition spec(s) - colPath = QualifiedNameUtil.getColPath(db, tableTypeExpr, dbName, tableName, partSpec); + colPath = QualifiedNameUtil.getColPath(db, tableTypeExpr, tableName, partSpec); // if database is not the one currently using // validate database - if (dbName != null) { - validateDatabase(dbName); + if (tableName.getDb() != null) { + validateDatabase(tableName.getDb()); } if (partSpec != null) { validateTable(tableName, partSpec); @@ -1676,7 +1666,7 @@ private void analyzeShowPartitions(ASTNode ast) throws SemanticException { partSpec = partSpecs.get(0); } - validateTable(tableName, null); + validateTable(HiveTableName.ofNullableWithNoDefault(tableName), null); showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec); inputs.add(new ReadEntity(getTable(tableName))); @@ -1798,7 +1788,7 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { } if (partSpec != null) { - validateTable(tableNames, partSpec); + validateTable(HiveTableName.ofNullableWithNoDefault(tableNames), partSpec); } showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile().toString(), dbName, tableNames, partSpec); @@ -1808,16 +1798,15 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { ShowTablePropertiesDesc showTblPropertiesDesc; - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + TableName qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String propertyName = null; if (ast.getChildCount() > 1) { propertyName = unescapeSQLString(ast.getChild(1).getText()); } - String tableNames = getDotName(qualified); - validateTable(tableNames, null); + validateTable(qualified, null); - showTblPropertiesDesc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableNames, propertyName); + showTblPropertiesDesc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), qualified, propertyName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblPropertiesDesc))); setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA)); } @@ -2050,24 +2039,21 @@ private void analyzeUnlockTable(ASTNode ast) ctx.setNeedLockMgr(true); } - private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expectView) + private void analyzeAlterTableRename(TableName source, ASTNode ast, boolean expectView) throws SemanticException { - String[] target = getQualifiedTableName((ASTNode) ast.getChild(0)); + final TableName target = getQualifiedTableName((ASTNode) ast.getChild(0)); - String sourceName = getDotName(source); - String targetName = getDotName(target); - - AlterTableRenameDesc alterTblDesc = new AlterTableRenameDesc(sourceName, null, expectView, targetName); - Table table = getTable(sourceName, true); + AlterTableRenameDesc alterTblDesc = new AlterTableRenameDesc(source, null, expectView, target.getDbTable()); + Table table = getTable(source.getDbTable(), true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } - addInputsOutputsAlterTable(sourceName, null, alterTblDesc, alterTblDesc.getType(), false); + addInputsOutputsAlterTable(source, null, alterTblDesc, alterTblDesc.getType(), false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTNode ast, - HashMap partSpec) throws SemanticException { + private void analyzeAlterTableRenameCol(TableName tName, ASTNode ast, Map partSpec) + throws SemanticException { String newComment = null; boolean first = false; String flagCol = null; @@ -2110,35 +2096,29 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN switch (constraintChild.getToken().getType()) { case HiveParser.TOK_CHECK_CONSTRAINT: checkConstraints = new ArrayList<>(); - processCheckConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), checkConstraints, (ASTNode)ast.getChild(2), - this.ctx.getTokenRewriteStream()); + processCheckConstraints(tName, constraintChild, ImmutableList.of(newColName), checkConstraints, + (ASTNode) ast.getChild(2), this.ctx.getTokenRewriteStream()); break; case HiveParser.TOK_DEFAULT_VALUE: defaultConstraints = new ArrayList<>(); - processDefaultConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), defaultConstraints, (ASTNode)ast.getChild(2), - this.ctx.getTokenRewriteStream()); + processDefaultConstraints(tName, constraintChild, ImmutableList.of(newColName), defaultConstraints, + (ASTNode) ast.getChild(2), this.ctx.getTokenRewriteStream()); break; case HiveParser.TOK_NOT_NULL: notNullConstraints = new ArrayList<>(); - processNotNullConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), notNullConstraints); + processNotNullConstraints(tName, constraintChild, ImmutableList.of(newColName), notNullConstraints); break; case HiveParser.TOK_UNIQUE: uniqueConstraints = new ArrayList<>(); - processUniqueConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), uniqueConstraints); + processUniqueConstraints(tName, constraintChild, ImmutableList.of(newColName), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: primaryKeys = new ArrayList<>(); - processPrimaryKeys(qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), primaryKeys); + processPrimaryKeys(tName, constraintChild, ImmutableList.of(newColName), primaryKeys); break; case HiveParser.TOK_FOREIGN_KEY: foreignKeys = new ArrayList<>(); - processForeignKeys(qualified[0], qualified[1], constraintChild, - foreignKeys); + processForeignKeys(tName, constraintChild, foreignKeys); break; default: throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg( @@ -2147,7 +2127,7 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN } /* Validate the operation of renaming a column name. */ - Table tab = getTable(qualified); + Table tab = getTable(tName); if(checkConstraints != null && !checkConstraints.isEmpty()) { validateCheckConstraint(tab.getCols(), checkConstraints, ctx.getConf()); @@ -2168,12 +2148,11 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg()); } - String tblName = getDotName(qualified); Constraints constraints = new Constraints(primaryKeys, foreignKeys, notNullConstraints, uniqueConstraints, defaultConstraints, checkConstraints); - AlterTableChangeColumnDesc alterTblDesc = new AlterTableChangeColumnDesc(tblName, partSpec, isCascade, constraints, + AlterTableChangeColumnDesc alterTblDesc = new AlterTableChangeColumnDesc(tName, partSpec, isCascade, constraints, unescapeIdentifier(oldColName), unescapeIdentifier(newColName), newType, newComment, first, flagCol); - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, alterTblDesc.getType(), false); + addInputsOutputsAlterTable(tName, partSpec, alterTblDesc, alterTblDesc.getType(), false); if (AcidUtils.isTransactionalTable(tab)) { // Note: we might actually need it only when certain changes (e.g. name or type?) are made. setAcidDdlDesc(alterTblDesc); @@ -2183,7 +2162,7 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, + private void analyzeAlterTableRenamePart(ASTNode ast, TableName tblName, HashMap oldPartSpec) throws SemanticException { Table tab = getTable(tblName, true); validateAlterTableType(tab, AlterTableType.RENAMEPARTITION); @@ -2208,7 +2187,7 @@ private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), renamePartitionDesc))); } - private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, Map partSpec) + private void analyzeAlterTableBucketNum(ASTNode ast, TableName tblName, Map partSpec) throws SemanticException { Table tab = getTable(tblName, true); if (CollectionUtils.isEmpty(tab.getBucketCols())) { @@ -2223,48 +2202,45 @@ private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, Map partSpec) + private void analyzeAlterTableAddCols(TableName tName, ASTNode ast, Map partSpec) throws SemanticException { - String tblName = getDotName(qualified); List newCols = getColumns((ASTNode) ast.getChild(0)); boolean isCascade = false; if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { isCascade = true; } - AlterTableAddColumnsDesc desc = new AlterTableAddColumnsDesc(tblName, partSpec, isCascade, newCols); - Table table = getTable(tblName, true); + AlterTableAddColumnsDesc desc = new AlterTableAddColumnsDesc(tName, partSpec, isCascade, newCols); + Table table = getTable(tName, true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(desc); } - addInputsOutputsAlterTable(tblName, partSpec, desc, desc.getType(), false); + addInputsOutputsAlterTable(tName, partSpec, desc, desc.getType(), false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); } - private void analyzeAlterTableReplaceCols(String[] qualified, ASTNode ast, Map partSpec) + private void analyzeAlterTableReplaceCols(TableName tName, ASTNode ast, Map partSpec) throws SemanticException { - String tblName = getDotName(qualified); List newCols = getColumns((ASTNode) ast.getChild(0)); boolean isCascade = false; if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { isCascade = true; } - AlterTableReplaceColumnsDesc alterTblDesc = new AlterTableReplaceColumnsDesc(tblName, partSpec, isCascade, newCols); - Table table = getTable(tblName, true); + AlterTableReplaceColumnsDesc alterTblDesc = new AlterTableReplaceColumnsDesc(tName, partSpec, isCascade, newCols); + Table table = getTable(tName, true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, alterTblDesc.getType(), false); + addInputsOutputsAlterTable(tName, partSpec, alterTblDesc, alterTblDesc.getType(), false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean expectView) - throws SemanticException { + private void analyzeAlterTableDropParts(TableName tName, ASTNode ast, boolean expectView) throws SemanticException { boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) || HiveConf.getBoolVar(conf, ConfVars.DROP_IGNORES_NON_EXISTENT); @@ -2281,7 +2257,7 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean Table tab = null; try { - tab = getTable(qualified); + tab = getTable(tName); } catch (SemanticException se){ if (replicationSpec.isInReplicationScope() && ( @@ -2315,16 +2291,16 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists); AlterTableDropPartitionDesc dropTblDesc = - new AlterTableDropPartitionDesc(getDotName(qualified), partSpecs, mustPurge, replicationSpec); + new AlterTableDropPartitionDesc(tName, partSpecs, mustPurge, replicationSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc))); } - private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) + private void analyzeAlterTablePartColType(TableName tableName, ASTNode ast) throws SemanticException { // check if table exists. - Table tab = getTable(qualified); + Table tab = getTable(tableName); inputs.add(new ReadEntity(tab)); // validate the DDL is a valid operation on the table. @@ -2362,7 +2338,7 @@ private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) } AlterTableAlterPartitionDesc alterTblAlterPartDesc = - new AlterTableAlterPartitionDesc(getDotName(qualified), newCol); + new AlterTableAlterPartitionDesc(tableName.getDbTable(), newCol); if (AcidUtils.isTransactionalTable(tab)) { setAcidDdlDesc(alterTblAlterPartDesc); } @@ -2370,7 +2346,7 @@ private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblAlterPartDesc))); } - /** + /** * Add one or more partitions to a table. Useful when the data has been copied * to the right location by some other process. * @@ -2383,13 +2359,12 @@ private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) * @throws SemanticException * Parsing failed */ - private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boolean expectView) - throws SemanticException { + private void analyzeAlterTableAddParts(TableName tName, CommonTree ast, boolean expectView) throws SemanticException { // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) boolean ifNotExists = ast.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS; - Table table = getTable(qualified); + Table table = getTable(tName); boolean isView = table.isView(); validateAlterTableType(table, AlterTableType.ADDPARTITION, expectView); outputs.add(new WriteEntity(table, @@ -2454,9 +2429,9 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole // Compile internal query to capture underlying table partition dependencies StringBuilder cmd = new StringBuilder(); cmd.append("SELECT * FROM "); - cmd.append(HiveUtils.unparseIdentifier(qualified[0])); + cmd.append(HiveUtils.unparseIdentifier(tName.getDb())); cmd.append("."); - cmd.append(HiveUtils.unparseIdentifier(qualified[1])); + cmd.append(HiveUtils.unparseIdentifier(tName.getTable())); cmd.append(" WHERE "); boolean firstOr = true; for (AlterTableAddPartitionDesc.PartitionDesc partitionDesc : partitions) { @@ -2568,10 +2543,9 @@ private void handleTransactionalTable(Table tab, AlterTableAddPartitionDesc addP * @throws SemanticException * Parsing failed */ - private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) - throws SemanticException { + private void analyzeAlterTableTouch(TableName tName, CommonTree ast) throws SemanticException { - Table tab = getTable(qualified); + Table tab = getTable(tName); validateAlterTableType(tab, AlterTableType.TOUCH); inputs.add(new ReadEntity(tab)); @@ -2579,26 +2553,25 @@ private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) List> partSpecs = getPartitionSpecs(tab, ast); if (partSpecs.isEmpty()) { - AlterTableTouchDesc touchDesc = new AlterTableTouchDesc(getDotName(qualified), null); + AlterTableTouchDesc touchDesc = new AlterTableTouchDesc(tName.getDbTable(), null); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); } else { addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); for (Map partSpec : partSpecs) { - AlterTableTouchDesc touchDesc = new AlterTableTouchDesc(getDotName(qualified), partSpec); + AlterTableTouchDesc touchDesc = new AlterTableTouchDesc(tName.getDbTable(), partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); } } } - private void analyzeAlterTableArchive(String[] qualified, CommonTree ast, boolean isUnArchive) - throws SemanticException { + private void analyzeAlterTableArchive(TableName tName, CommonTree ast, boolean isUnArchive) throws SemanticException { if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } - Table tab = getTable(qualified); + Table tab = getTable(tName); // partition name to value List> partSpecs = getPartitionSpecs(tab, ast); @@ -2623,9 +2596,9 @@ private void analyzeAlterTableArchive(String[] qualified, CommonTree ast, boolea } DDLDesc archiveDesc = null; if (isUnArchive) { - archiveDesc = new AlterTableUnarchiveDesc(getDotName(qualified), partSpec); + archiveDesc = new AlterTableUnarchiveDesc(tName.getDbTable(), partSpec); } else { - archiveDesc = new AlterTableArchiveDesc(getDotName(qualified), partSpec); + archiveDesc = new AlterTableArchiveDesc(tName.getDbTable(), partSpec); } rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), archiveDesc))); } @@ -2994,32 +2967,31 @@ private void addTableDropPartsOutputs(Table tab, * node * @throws SemanticException */ - private void analyzeAlterTableSkewedby(String[] qualified, ASTNode ast) throws SemanticException { + private void analyzeAlterTableSkewedby(TableName tName, ASTNode ast) throws SemanticException { /** * Throw an error if the user tries to use the DDL with * hive.internal.ddl.list.bucketing.enable set to false. */ SessionState.get().getConf(); - Table tab = getTable(qualified); + Table tab = getTable(tName); inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); validateAlterTableType(tab, AlterTableType.SKEWED_BY); - String tableName = getDotName(qualified); if (ast.getChildCount() == 0) { /* Convert a skewed table to non-skewed table. */ - AlterTableNotSkewedDesc alterTblDesc = new AlterTableNotSkewedDesc(tableName); + AlterTableNotSkewedDesc alterTblDesc = new AlterTableNotSkewedDesc(tName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } else { switch (((ASTNode) ast.getChild(0)).getToken().getType()) { case HiveParser.TOK_TABLESKEWED: - handleAlterTableSkewedBy(ast, tableName, tab); + handleAlterTableSkewedBy(ast, tName, tab); break; case HiveParser.TOK_STOREDASDIRS: - handleAlterTableDisableStoredAsDirs(tableName, tab); + handleAlterTableDisableStoredAsDirs(tName, tab); break; default: assert false; @@ -3034,12 +3006,12 @@ private void analyzeAlterTableSkewedby(String[] qualified, ASTNode ast) throws S * @param tab * @throws SemanticException */ - private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) + private void handleAlterTableDisableStoredAsDirs(TableName tableName, Table tab) throws SemanticException { List skewedColNames = tab.getSkewedColNames(); List> skewedColValues = tab.getSkewedColValues(); if (CollectionUtils.isEmpty(skewedColNames) || CollectionUtils.isEmpty(skewedColValues)) { - throw new SemanticException(ErrorMsg.ALTER_TBL_STOREDASDIR_NOT_SKEWED.getMsg(tableName)); + throw new SemanticException(ErrorMsg.ALTER_TBL_STOREDASDIR_NOT_SKEWED.getMsg(tableName.getNotEmptyDbTable())); } AlterTableSkewedByDesc alterTblDesc = new AlterTableSkewedByDesc(tableName, skewedColNames, skewedColValues, false); @@ -3053,7 +3025,7 @@ private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) * @param tab * @throws SemanticException */ - private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) throws SemanticException { + private void handleAlterTableSkewedBy(ASTNode ast, TableName tableName, Table tab) throws SemanticException { List skewedColNames = new ArrayList(); List> skewedValues = new ArrayList>(); /* skewed column names. */ @@ -3083,7 +3055,7 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) * @param partSpec * @throws SemanticException */ - private void analyzeAlterTableSkewedLocation(ASTNode ast, String tableName, + private void analyzeAlterTableSkewedLocation(ASTNode ast, TableName tableName, HashMap partSpec) throws SemanticException { /** * Throw an error if the user tries to use the DDL with diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java index 4a366a9360..83c0d2bf43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java @@ -115,8 +115,8 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { MmContext mmCtx = MmContext.createIfNeeded(ts == null ? null : ts.tableHandle); Utilities.FILE_OP_LOGGER.debug("Exporting table {}: MM context {}", - ts == null ? null : ts.tableName, mmCtx); - // Configure export work + ts == null ? null : ts.getTableName(), mmCtx); + // Configure export work ExportWork exportWork = new ExportWork(exportRootDirName, ts, replicationSpec, ErrorMsg.INVALID_PATH.getMsg(ast), acidTableName, mmCtx); // Create an export task and add it as a root task diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java new file mode 100644 index 0000000000..cd9f88c53b --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.session.SessionState; + +/** + * A utility class for {@link TableName}. + */ +public final class HiveTableName extends TableName { + + public HiveTableName(String catName, String dbName, String tableName) { + super(catName, dbName, tableName); + } + + /** + * Get a {@link TableName} object based on a {@link Table}. This is basically a wrapper of + * {@link TableName#fromString(String, String, String)} to throw a {@link SemanticException} in case of errors. + * @param table the table + * @return a {@link TableName} + * @throws SemanticException + */ + public static TableName of(Table table) throws SemanticException { + return ofNullable(table.getTableName(), table.getDbName()); + } + + /** + * Set a @{@link Table} object's table and db names based on the provided string. + * @param dbTable the dbtable string + * @param table the table to update + * @return the table + * @throws SemanticException + */ + public static Table setFrom(String dbTable, Table table) throws SemanticException{ + TableName name = ofNullable(dbTable); + table.setTableName(name.getTable()); + table.setDbName(name.getDb()); + return table; + } + + /** + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName}. All parts can be null. + * + * @param dbTableName + * @return a {@link TableName} + * @throws SemanticException + * @deprecated use {@link #of(String)} or {@link #fromString(String, String, String)} + */ + // to be @Deprecated + public static TableName ofNullable(String dbTableName) throws SemanticException { + return ofNullable(dbTableName, SessionState.get().getCurrentDatabase()); + } + + /** + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName}. All parts can be null. This method won't try to find the default db based on the session state. + * + * @param dbTableName + * @return a {@link TableName} + * @throws SemanticException + * @deprecated use {@link #of(String)} or {@link #fromString(String, String, String)} + */ + // to be @Deprecated + public static TableName ofNullableWithNoDefault(String dbTableName) throws SemanticException { + return ofNullable(dbTableName, null); + } + + /** + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName}. All parts can be null. + * + * @param dbTableName + * @param defaultDb + * @return a {@link TableName} + * @throws SemanticException + * @deprecated use {@link #of(String)} or {@link #fromString(String, String, String)} + */ + // to be @Deprecated + public static TableName ofNullable(String dbTableName, String defaultDb) throws SemanticException { + if (dbTableName == null) { + return new TableName(null, null, null); + } else { + try { + return fromString(dbTableName, SessionState.get().getCurrentCatalog(), defaultDb); + } catch (IllegalArgumentException e) { + throw new SemanticException(e); + } + } + } + + /** + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName}. This method won't try to find the default db/catalog based on the session state. + * + * @param dbTableName not null + * @return a {@link TableName} + * @throws SemanticException if dbTableName is null + * @deprecated use {@link #of(String)} instead and use the default db/catalog. + */ + // to be @Deprecated + public static TableName withNoDefault(String dbTableName) throws SemanticException { + try { + return fromString(dbTableName, null, null); + } catch (IllegalArgumentException e) { + throw new SemanticException(e); + } + } + + /** + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName}. + * + * @param dbTableName not null + * @return a {@link TableName} + * @throws SemanticException if dbTableName is null + */ + public static TableName of(String dbTableName) throws SemanticException { + try { + return fromString(dbTableName, SessionState.get().getCurrentCatalog(), SessionState.get().getCurrentDatabase()); + } catch (IllegalArgumentException e) { + throw new SemanticException(e); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index ec75fa4e9a..85c5360699 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; @@ -327,7 +328,7 @@ public static boolean prepareImport(boolean isImportCmd, } if (StringUtils.isNotBlank(parsedTableName)) { - tblDesc.setTableName(parsedTableName); + tblDesc.setTableName(TableName.fromString(parsedTableName, null, dbname)); } if (tblDesc.getTableName() == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java index 33247f0745..31068cb8c3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; @@ -177,7 +178,7 @@ protected Table getTargetTable(ASTNode tabRef) throws SemanticException { * @param throwException if false, return null if table doesn't exist, else throw */ protected static Table getTable(ASTNode tabRef, Hive db, boolean throwException) throws SemanticException { - String[] tableName; + TableName tableName; switch (tabRef.getType()) { case HiveParser.TOK_TABREF: tableName = getQualifiedTableName((ASTNode) tabRef.getChild(0)); @@ -191,12 +192,12 @@ protected static Table getTable(ASTNode tabRef, Hive db, boolean throwException) Table mTable; try { - mTable = db.getTable(tableName[0], tableName[1], throwException); + mTable = db.getTable(tableName.getDb(), tableName.getTable(), throwException); } catch (InvalidTableException e) { - LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + e.getMessage()); - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(getDotName(tableName)), e); + LOG.error("Failed to find table " + tableName.getNotEmptyDbTable() + " got exception " + e.getMessage()); + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName.getNotEmptyDbTable()), e); } catch (HiveException e) { - LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + e.getMessage()); + LOG.error("Failed to find table " + tableName.getNotEmptyDbTable() + " got exception " + e.getMessage()); throw new SemanticException(e.getMessage(), e); } return mTable; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 0198c0f724..5a5ca96cfe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -74,6 +74,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; @@ -2275,9 +2276,9 @@ private void getMetaData(QB qb, ReadEntity parentInput) // Whether we are using an acid compliant transaction manager has already been caught in // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting and getting nonAcid // here, it means the table itself doesn't support it. - throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, ts.tableName); + throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, ts.getTableName().getTable()); } else { - throw new SemanticException(ErrorMsg.ACID_OP_ON_INSERTONLYTRAN_TABLE, ts.tableName); + throw new SemanticException(ErrorMsg.ACID_OP_ON_INSERTONLYTRAN_TABLE, ts.getTableName().getTable()); } } // TableSpec ts is got from the query (user specified), @@ -2296,7 +2297,7 @@ private void getMetaData(QB qb, ReadEntity parentInput) } if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { // Add the table spec for the destination table. - qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts); + qb.getParseInfo().addTableSpec(ts.getTableName().getTable().toLowerCase(), ts); } break; } @@ -2304,8 +2305,8 @@ private void getMetaData(QB qb, ReadEntity parentInput) case HiveParser.TOK_DIR: { // This is a dfs file String fname = stripQuotes(ast.getChild(0).getText()); - if ((!qb.getParseInfo().getIsSubQ()) - && (((ASTNode) ast.getChild(0)).getToken().getType() == HiveParser.TOK_TMP_FILE)) { + if ((!qb.getParseInfo().getIsSubQ()) && (((ASTNode) ast.getChild(0)).getToken().getType() + == HiveParser.TOK_TMP_FILE)) { if (qb.isCTAS() || qb.isMaterializedView()) { qb.setIsQuery(false); @@ -2330,27 +2331,25 @@ private void getMetaData(QB qb, ReadEntity parentInput) location = wh.getDatabasePath(db.getDatabase(destTableDb)); } catch (MetaException e) { throw new SemanticException(e); - } - } - try { - CreateTableDesc tblDesc = qb.getTableDesc(); - if (tblDesc != null - && tblDesc.isTemporary() - && AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) { - fname = FileUtils.makeQualified(location, conf).toString(); - } else { - fname = ctx.getExtTmpPathRelTo( - FileUtils.makeQualified(location, conf)).toString(); - } - } catch (Exception e) { - throw new SemanticException(generateErrorMessage(ast, - "Error creating temporary folder on: " + location.toString()), e); } - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - TableSpec ts = new TableSpec(db, conf, this.ast); - // Add the table spec for the destination table. - qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts); + } + try { + CreateTableDesc tblDesc = qb.getTableDesc(); + if (tblDesc != null && tblDesc.isTemporary() && AcidUtils + .isInsertOnlyTable(tblDesc.getTblProps(), true)) { + fname = FileUtils.makeQualified(location, conf).toString(); + } else { + fname = ctx.getExtTmpPathRelTo(FileUtils.makeQualified(location, conf)).toString(); } + } catch (Exception e) { + throw new SemanticException( + generateErrorMessage(ast, "Error creating temporary folder on: " + location.toString()), e); + } + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + TableSpec ts = new TableSpec(db, conf, this.ast); + // Add the table spec for the destination table. + qb.getParseInfo().addTableSpec(ts.getTableName().getTable().toLowerCase(), ts); + } } else { // This is the only place where isQuery is set to true; it defaults to false. qb.setIsQuery(true); @@ -7029,8 +7028,7 @@ private Operator genMaterializedViewDataOrgPlan(List sortColInfos, L } private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException { - String qTableName = DDLSemanticAnalyzer.getDotName(new String[] { dbName, - tableName }); + TableName qTableName = HiveTableName.ofNullable(tableName, dbName); Map mapProp = new HashMap<>(); mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null); AlterTableUnsetPropertiesDesc alterTblDesc = new AlterTableUnsetPropertiesDesc(qTableName, null, null, false, @@ -7568,8 +7566,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) List fileSinkColInfos = null; List sortColInfos = null; List distributeColInfos = null; - String dbName = null; - String tableName = null; + TableName tableName = null; Map tblProps = null; CreateTableDesc tblDesc = qb.getTableDesc(); CreateViewDesc viewDesc = qb.getViewDesc(); @@ -7580,15 +7577,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) fileSinkColInfos = new ArrayList<>(); destTableIsTemporary = tblDesc.isTemporary(); destTableIsMaterialization = tblDesc.isMaterialization(); - dbName = tblDesc.getDatabaseName(); - tableName = tblDesc.getTableName(); - // CreateTableDesc stores table name as db.table. So, need to decode it before allocating - // write id. - if (tableName.contains(".")) { - String[] names = Utilities.getDbTableName(tableName); - dbName = names[0]; - tableName = names[1]; - } + tableName = TableName.fromString(tblDesc.getDbTableName(), null, tblDesc.getDatabaseName()); tblProps = tblDesc.getTblProps(); } else if (viewDesc != null) { fieldSchemas = new ArrayList<>(); @@ -7603,9 +7592,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) distributeColInfos = new ArrayList<>(); destTableIsTemporary = false; destTableIsMaterialization = false; - String[] names = Utilities.getDbTableName(viewDesc.getViewName()); - dbName = names[0]; - tableName = names[1]; + tableName = HiveTableName.ofNullableWithNoDefault(viewDesc.getViewName()); tblProps = viewDesc.getTblProps(); } @@ -7614,7 +7601,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) if (ctx.getExplainConfig() != null) { writeId = 0L; // For explain plan, txn won't be opened and doesn't make sense to allocate write id } else { - writeId = txnMgr.getTableWriteId(dbName, tableName); + writeId = txnMgr.getTableWriteId(tableName.getDb(), tableName.getTable()); } } catch (LockException ex) { throw new SemanticException("Failed to allocate write Id", ex); @@ -8167,7 +8154,7 @@ private void handleLineage(LoadTableDesc ltd, Operator output) } else if ( queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) { Path tlocation = null; - String tName = Utilities.getDbTableName(tableDesc.getTableName())[1]; + String tName = Utilities.getDbTableName(tableDesc.getDbTableName())[1]; try { Warehouse wh = new Warehouse(conf); tlocation = wh.getDefaultTablePath(db.getDatabase(tableDesc.getDatabaseName()), @@ -13311,8 +13298,8 @@ private boolean hasConstraints(final List partCols, final List cols = new ArrayList(); @@ -13554,9 +13541,11 @@ ASTNode analyzeCreateTable( } tblProps = validateAndAddDefaultProperties( tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); + addDbAndTabToOutputs(new String[] {qualifiedTabName.getDb(), qualifiedTabName.getTable()}, + TableType.MANAGED_TABLE, isTemporary, tblProps); - CreateTableDesc crtTblDesc = new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, + CreateTableDesc crtTblDesc = new CreateTableDesc(qualifiedTabName, + isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, @@ -13576,14 +13565,15 @@ ASTNode analyzeCreateTable( case ctt: // CREATE TRANSACTIONAL TABLE if (isExt) { throw new SemanticException( - qualifiedTabName[1] + " cannot be declared transactional because it's an external table"); + qualifiedTabName.getTable() + " cannot be declared transactional because it's an external table"); } tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, false, tblProps); + addDbAndTabToOutputs(new String[] {qualifiedTabName.getDb(), qualifiedTabName.getTable()}, + TableType.MANAGED_TABLE, false, tblProps); CreateTableDesc crtTranTblDesc = - new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, + new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), @@ -13602,7 +13592,8 @@ ASTNode analyzeCreateTable( tblProps = validateAndAddDefaultProperties( tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); + addDbAndTabToOutputs(new String[] {qualifiedTabName.getDb(), qualifiedTabName.getTable()}, + TableType.MANAGED_TABLE, isTemporary, tblProps); Table likeTable = getTable(likeTableName, false); if (likeTable != null) { @@ -13629,17 +13620,15 @@ ASTNode analyzeCreateTable( if (isTemporary) { if (!ctx.isExplainSkipExecution() && !isMaterialization) { - String dbName = qualifiedTabName[0]; - String tblName = qualifiedTabName[1]; SessionState ss = SessionState.get(); if (ss == null) { throw new SemanticException("No current SessionState, cannot create temporary table " - + dbName + "." + tblName); + + qualifiedTabName.getNotEmptyDbTable()); } Map tables = SessionHiveMetaStoreClient. - getTempTablesForDatabase(dbName, tblName); - if (tables != null && tables.containsKey(tblName)) { - throw new SemanticException("Temporary table " + dbName + "." + tblName + getTempTablesForDatabase(qualifiedTabName.getDb(), qualifiedTabName.getTable()); + if (tables != null && tables.containsKey(qualifiedTabName.getTable())) { + throw new SemanticException("Temporary table " + qualifiedTabName.getNotEmptyDbTable() + " already exists"); } } @@ -13692,8 +13681,9 @@ ASTNode analyzeCreateTable( tblProps = validateAndAddDefaultProperties( tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); - tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, + addDbAndTabToOutputs(new String[] {qualifiedTabName.getDb(), qualifiedTabName.getTable()}, + TableType.MANAGED_TABLE, isTemporary, tblProps); + tableDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partColNames, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), @@ -13728,8 +13718,8 @@ private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type, } protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException { - String[] qualTabName = getQualifiedTableName((ASTNode) ast.getChild(0)); - String dbDotTable = getDotName(qualTabName); + TableName qualTabName = getQualifiedTableName((ASTNode) ast.getChild(0)); + final String dbDotTable = qualTabName.getNotEmptyDbTable(); List cols = null; boolean ifNotExists = false; boolean rewriteEnabled = true; @@ -13881,7 +13871,8 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps()); - addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW, false, tblProps); + addDbAndTabToOutputs(new String[] {qualTabName.getDb(), qualTabName.getTable()}, TableType.MATERIALIZED_VIEW, + false, tblProps); queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW); } else { createVwDesc = new CreateViewDesc( @@ -13889,7 +13880,8 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), storageFormat.getSerde()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createVwDesc))); - addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW, false, tblProps); + addDbAndTabToOutputs(new String[] {qualTabName.getDb(), qualTabName.getTable()}, + TableType.VIRTUAL_VIEW, false, tblProps); queryState.setCommandType(HiveOperation.CREATEVIEW); } qb.setViewDesc(createVwDesc); @@ -15297,15 +15289,14 @@ public boolean isValidQueryCaching() { */ protected String getFullTableNameForSQL(ASTNode n) throws SemanticException { switch (n.getType()) { - case HiveParser.TOK_TABNAME: - String[] tableName = getQualifiedTableName(n); - return getDotName(new String[] { - HiveUtils.unparseIdentifier(tableName[0], this.conf), - HiveUtils.unparseIdentifier(tableName[1], this.conf) }); - case HiveParser.TOK_TABREF: - return getFullTableNameForSQL((ASTNode) n.getChild(0)); - default: - throw raiseWrongType("TOK_TABNAME", n); + case HiveParser.TOK_TABNAME: + TableName tableName = getQualifiedTableName(n); + return HiveTableName.ofNullable(HiveUtils.unparseIdentifier(tableName.getTable(), this.conf), + HiveUtils.unparseIdentifier(tableName.getDb(), this.conf)).getNotEmptyDbTable(); + case HiveParser.TOK_TABREF: + return getFullTableNameForSQL((ASTNode) n.getChild(0)); + default: + throw raiseWrongType("TOK_TABNAME", n); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index ec46280627..36fb93f891 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -69,7 +69,6 @@ import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; -import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -87,7 +86,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -482,7 +480,7 @@ private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticExce String protoName = null; boolean isExternal = false; if (pCtx.getQueryProperties().isCTAS()) { - protoName = pCtx.getCreateTable().getTableName(); + protoName = pCtx.getCreateTable().getDbTableName(); isExternal = pCtx.getCreateTable().isExternal(); } else if (pCtx.getQueryProperties().isMaterializedView()) { protoName = pCtx.getCreateViewDesc().getViewName(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index c2353c5def..d54325d810 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -251,8 +251,7 @@ protected PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticExcept ASTNode gchild = (ASTNode)child.getChild(0); if (child.getType() == HiveParser.TOK_TABLE_TYPE) { isTable = true; - String[] qualified = BaseSemanticAnalyzer.getQualifiedTableName(gchild); - object = BaseSemanticAnalyzer.getDotName(qualified); + object = BaseSemanticAnalyzer.getQualifiedTableName(gchild).getNotEmptyDbTable(); } else if (child.getType() == HiveParser.TOK_URI_TYPE || child.getType() == HiveParser.TOK_SERVER_TYPE) { throw new SemanticException("Hive authorization does not support the URI or SERVER objects"); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java index 56850417dd..01b7fdc4b6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java @@ -152,7 +152,7 @@ private void writeData(PartitionIterable partitions) throws SemanticException { if (tableSpec.tableHandle.isPartitioned()) { if (partitions == null) { throw new IllegalStateException("partitions cannot be null for partitionTable :" - + tableSpec.tableName); + + tableSpec.getTableName().getTable()); } new PartitionExport(paths, partitions, distCpDoAsUser, conf, mmCtx).write(replicationSpec); } else { @@ -316,7 +316,7 @@ public AuthEntities getAuthEntities() throws SemanticException { if (tableSpec.tableHandle.isPartitioned()) { if (partitions == null) { throw new IllegalStateException("partitions cannot be null for partitionTable :" - + tableSpec.tableName); + + tableSpec.getTableName().getTable()); } for (Partition partition : partitions) { authEntities.inputs.add(new ReadEntity(partition)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java index d8ed9e2d2f..08436e4129 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; @@ -53,8 +54,9 @@ return tasks; } - String actualDbName = context.isDbNameEmpty() ? fks.get(0).getFktable_db() : context.dbName; - String actualTblName = fks.get(0).getFktable_name(); + final String actualDbName = context.isDbNameEmpty() ? fks.get(0).getFktable_db() : context.dbName; + final String actualTblName = fks.get(0).getFktable_name(); + final TableName tName = TableName.fromString(actualTblName, null, actualDbName); for (SQLForeignKey fk : fks) { // If parent table is in the same database, change it to the actual db on destination @@ -67,7 +69,7 @@ } Constraints constraints = new Constraints(null, fks, null, null, null, null); - AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java index 39f896ffcc..eb44fdba7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; @@ -53,8 +54,9 @@ return tasks; } - String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName; - String actualTblName = nns.get(0).getTable_name(); + final String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName; + final String actualTblName = nns.get(0).getTable_name(); + final TableName tName = TableName.fromString(actualTblName, null, actualDbName); for (SQLNotNullConstraint nn : nns) { nn.setTable_db(actualDbName); @@ -62,7 +64,7 @@ } Constraints constraints = new Constraints(null, null, nns, null, null, null); - AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java index 5bfced0398..1f704c2893 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; @@ -53,8 +54,9 @@ return tasks; } - String actualDbName = context.isDbNameEmpty() ? pks.get(0).getTable_db() : context.dbName; - String actualTblName = pks.get(0).getTable_name(); + final String actualDbName = context.isDbNameEmpty() ? pks.get(0).getTable_db() : context.dbName; + final String actualTblName = pks.get(0).getTable_name(); + final TableName tName = TableName.fromString(actualTblName, null, actualDbName); for (SQLPrimaryKey pk : pks) { pk.setTable_db(actualDbName); @@ -62,7 +64,7 @@ } Constraints constraints = new Constraints(pks, null, null, null, null, null); - AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java index 9cf5ffaa74..1da7a31822 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; @@ -53,8 +54,9 @@ return tasks; } - String actualDbName = context.isDbNameEmpty() ? uks.get(0).getTable_db() : context.dbName; - String actualTblName = uks.get(0).getTable_name(); + final String actualDbName = context.isDbNameEmpty() ? uks.get(0).getTable_db() : context.dbName; + final String actualTblName = uks.get(0).getTable_name(); + final TableName tName = TableName.fromString(actualTblName, null, actualDbName); for (SQLUniqueConstraint uk : uks) { uk.setTable_db(actualDbName); @@ -62,7 +64,7 @@ } Constraints constraints = new Constraints(null, null, null, uks, null, null); - AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(actualDbName + "." + actualTblName, + AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java index 0db9f190fa..e37fc149a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.table.constaint.AlterTableDropConstraintDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.SemanticException; import java.io.Serializable; @@ -33,12 +35,13 @@ public List> handle(Context context) throws SemanticException { DropConstraintMessage msg = deserializer.getDropConstraintMessage(context.dmd.getPayload()); - String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; - String actualTblName = msg.getTable(); + final String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; + final String actualTblName = msg.getTable(); + final TableName tName = HiveTableName.ofNullable(actualDbName, actualTblName); String constraintName = msg.getConstraint(); - AlterTableDropConstraintDesc dropConstraintsDesc = new AlterTableDropConstraintDesc( - actualDbName + "." + actualTblName, context.eventOnlyReplicationSpec(), constraintName); + AlterTableDropConstraintDesc dropConstraintsDesc = + new AlterTableDropConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraintName); Task dropConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc), context.hiveConf); context.log.debug("Added drop constrain task : {}:{}", dropConstraintsTask.getId(), actualTblName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index f65559706b..e650f52aa2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; @@ -43,8 +44,9 @@ Map> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions()); if (partSpecs.size() > 0) { - AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(actualDbName + "." + actualTblName, - partSpecs, true, context.eventOnlyReplicationSpec()); + AlterTableDropPartitionDesc dropPtnDesc = + new AlterTableDropPartitionDesc(HiveTableName.ofNullable(actualTblName, actualDbName), partSpecs, true, + context.eventOnlyReplicationSpec()); Task dropPtnTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf ); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java index 6dd69767fe..c936840d31 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; @@ -45,7 +46,7 @@ Map newPartSpec = new LinkedHashMap<>(); Map oldPartSpec = new LinkedHashMap<>(); - String tableName = actualDbName + "." + actualTblName; + TableName tableName = TableName.fromString(actualTblName, null, actualDbName); Table tableObj; ReplicationSpec replicationSpec = context.eventOnlyReplicationSpec(); try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java index c810b8c517..82e50ff442 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; @@ -26,7 +27,6 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.stats.StatsUtils; import java.io.Serializable; import java.util.List; @@ -55,18 +55,19 @@ } } - String oldName = StatsUtils.getFullyQualifiedTableName(oldDbName, tableObjBefore.getTableName()); - String newName = StatsUtils.getFullyQualifiedTableName(newDbName, tableObjAfter.getTableName()); + TableName oldName = TableName.fromString(tableObjBefore.getTableName(), null, oldDbName); + TableName newName = TableName.fromString(tableObjAfter.getTableName(), null, newDbName); ReplicationSpec replicationSpec = context.eventOnlyReplicationSpec(); if (ReplUtils.isTableMigratingToTransactional(context.hiveConf, tableObjAfter)) { replicationSpec.setMigratingToTxnTable(); } - AlterTableRenameDesc renameTableDesc = new AlterTableRenameDesc(oldName, replicationSpec, false, newName); + AlterTableRenameDesc renameTableDesc = + new AlterTableRenameDesc(oldName, replicationSpec, false, newName.getNotEmptyDbTable()); renameTableDesc.setWriteId(msg.getWriteId()); Task renameTableTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, renameTableDesc), context.hiveConf); context.log.debug("Added rename table task : {}:{}->{}", - renameTableTask.getId(), oldName, newName); + renameTableTask.getId(), oldName.getNotEmptyDbTable(), newName.getNotEmptyDbTable()); // oldDbName and newDbName *will* be the same if we're here updatedMetadata.set(context.dmd.getEventTo().toString(), newDbName, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index 1b1efbcd8f..25e524af37 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; @@ -36,8 +37,8 @@ @Override public List> handle(Context context) throws SemanticException { AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload()); - String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; - String actualTblName = msg.getTable(); + final TableName tName = TableName.fromString(msg.getTable(), null, + context.isDbNameEmpty() ? msg.getDB() : context.dbName); Map partSpec = new LinkedHashMap<>(); org.apache.hadoop.hive.metastore.api.Table tblObj; @@ -56,17 +57,17 @@ } TruncateTableDesc truncateTableDesc = new TruncateTableDesc( - actualDbName + "." + actualTblName, partSpec, + tName, partSpec, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); Task truncatePtnTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); - updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, partSpec); + updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), partSpec); try { - return ReplUtils.addOpenTxnTaskForMigration(actualDbName, actualTblName, + return ReplUtils.addOpenTxnTaskForMigration(tName.getDb(), tName.getTable(), context.hiveConf, updatedMetadata, truncatePtnTask, tblObj); } catch (Exception e) { throw new SemanticException(e.getMessage()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index c18529fb2d..35b8e0e684 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.table.misc.TruncateTableDesc; @@ -32,22 +33,20 @@ @Override public List> handle(Context context) throws SemanticException { AlterTableMessage msg = deserializer.getAlterTableMessage(context.dmd.getPayload()); - String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; - String actualTblName = msg.getTable(); + final TableName tName = TableName.fromString(msg.getTable(), null, + context.isDbNameEmpty() ? msg.getDB() : context.dbName); - TruncateTableDesc truncateTableDesc = new TruncateTableDesc( - actualDbName + "." + actualTblName, - null, context.eventOnlyReplicationSpec()); + TruncateTableDesc truncateTableDesc = new TruncateTableDesc(tName, null, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); Task truncateTableTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); - updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); + updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), null); try { - return ReplUtils.addOpenTxnTaskForMigration(actualDbName, actualTblName, + return ReplUtils.addOpenTxnTaskForMigration(tName.getDb(), tName.getTable(), context.hiveConf, updatedMetadata, truncateTableTask, msg.getTableObjBefore()); } catch (Exception e) { throw new SemanticException(e.getMessage()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java index 40def601e6..8fa50a810a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java @@ -195,9 +195,9 @@ public String getTableName() { if (work.getLoadTableDesc() != null) { return work.getLoadTableDesc().getTable().getTableName(); } else if (work.getTableSpecs() != null) { - return work.getTableSpecs().tableName; + return work.getTableSpecs().getTableName().getTable(); } else if (getLoadFileDesc().getCtasCreateTableDesc() != null) { - return getLoadFileDesc().getCtasCreateTableDesc().getTableName(); + return getLoadFileDesc().getCtasCreateTableDesc().getDbTableName(); } else { return getLoadFileDesc().getCreateViewDesc().getViewName(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java index ffb81b54b9..caa22a03b4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java @@ -107,7 +107,7 @@ public MmContext getMmContext() { */ public void acidPostProcess(Hive db) throws HiveException { if (acidFqTableName != null) { - LOG.info("Swapping export of " + tableSpec.tableName + " to " + acidFqTableName + + LOG.info("Swapping export of " + tableSpec.getTableName().getTable() + " to " + acidFqTableName + " using partSpec=" + tableSpec.partSpec); tableSpec = new TableSpec(db, acidFqTableName, tableSpec.partSpec, true); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 09d8089c5c..7f8242c69f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -18,14 +18,12 @@ package org.apache.hadoop.hive.ql.plan; -import java.io.Serializable; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import com.google.common.collect.ImmutableSet; -import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -35,12 +33,11 @@ import org.apache.hadoop.hive.ql.ddl.view.create.CreateViewDesc; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -59,44 +56,43 @@ public ImportTableDesc(String dbName, Table table) throws Exception { this.dbName = dbName; this.table = table; + final TableName tableName = HiveTableName.ofNullable(table.getTableName(), dbName); switch (getDescType()) { - case TABLE: - this.createTblDesc = new CreateTableDesc(dbName, - table.getTableName(), - false, // isExternal: set to false here, can be overwritten by the IMPORT stmt - false, - table.getSd().getCols(), - table.getPartitionKeys(), - table.getSd().getBucketCols(), - table.getSd().getSortCols(), - table.getSd().getNumBuckets(), - null, null, null, null, null, // these 5 delims passed as serde params - null, // comment passed as table params - table.getSd().getInputFormat(), - table.getSd().getOutputFormat(), - null, // location: set to null here, can be overwritten by the IMPORT stmt - table.getSd().getSerdeInfo().getSerializationLib(), - null, // storagehandler passed as table params - table.getSd().getSerdeInfo().getParameters(), - table.getParameters(), false, - (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() - .getSkewedColNames(), - (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() - .getSkewedColValues(), - null, - null, - null, - null, - null, - null, - table.getColStats(), - table.getTTable().getWriteId()); - this.createTblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories()); - break; - case VIEW: - String[] qualViewName = { dbName, table.getTableName() }; - String dbDotView = BaseSemanticAnalyzer.getDotName(qualViewName); + case TABLE: + this.createTblDesc = new CreateTableDesc(tableName, + false, // isExternal: set to false here, can be overwritten by the IMPORT stmt + false, + table.getSd().getCols(), + table.getPartitionKeys(), + table.getSd().getBucketCols(), + table.getSd().getSortCols(), + table.getSd().getNumBuckets(), + null, null, null, null, null, // these 5 delims passed as serde params + null, // comment passed as table params + table.getSd().getInputFormat(), + table.getSd().getOutputFormat(), + null, // location: set to null here, can be overwritten by the IMPORT stmt + table.getSd().getSerdeInfo().getSerializationLib(), + null, // storagehandler passed as table params + table.getSd().getSerdeInfo().getParameters(), + table.getParameters(), false, + (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() + .getSkewedColNames(), + (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() + .getSkewedColValues(), + null, + null, + null, + null, + null, + null, + table.getColStats(), + table.getTTable().getWriteId()); + this.createTblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories()); + break; + case VIEW: + final String dbDotView = tableName.getNotEmptyDbTable(); if (table.isMaterializedView()) { this.createViewDesc = new CreateViewDesc(dbDotView, table.getAllCols(), @@ -201,27 +197,23 @@ public String getLocation() { return null; } - public void setTableName(String tableName) throws SemanticException { + public void setTableName(TableName tableName) throws SemanticException { switch (getDescType()) { - case TABLE: - createTblDesc.setTableName(tableName); - break; - case VIEW: - String[] qualViewName = { dbName, tableName }; - String dbDotView = BaseSemanticAnalyzer.getDotName(qualViewName); - createViewDesc.setViewName(dbDotView); - break; + case TABLE: + createTblDesc.setTableName(tableName); + break; + case VIEW: + createViewDesc.setViewName(tableName.getNotEmptyDbTable()); + break; } } public String getTableName() throws SemanticException { switch (getDescType()) { case TABLE: - return createTblDesc.getTableName(); + return createTblDesc.getTableName().getTable(); case VIEW: - String dbDotView = createViewDesc.getViewName(); - String[] names = Utilities.getDbTableName(dbDotView); - return names[1]; // names[0] have the Db name and names[1] have the view name + return TableName.fromString(createViewDesc.getViewName(), null, null).getTable(); } return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index e20f6956b2..b53635d6a2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -368,9 +368,9 @@ public static TableDesc getTableDesc(CreateTableDesc crtTblDesc, String cols, crtTblDesc.getNullFormat()); } - if (crtTblDesc.getTableName() != null && crtTblDesc.getDatabaseName() != null) { + if (crtTblDesc.getDbTableName() != null && crtTblDesc.getDatabaseName() != null) { properties.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME, - crtTblDesc.getTableName()); + crtTblDesc.getDbTableName()); } if (crtTblDesc.getTblProps() != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index be527095c3..6837cda61b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -1765,10 +1765,12 @@ public static long getDataSizeFromColumnStats(long numRows, List return result; } + @Deprecated public static String getFullyQualifiedTableName(String dbName, String tabName) { return getFullyQualifiedName(dbName, tabName); } + @Deprecated private static String getFullyQualifiedName(String... names) { List nonNullAndEmptyNames = Lists.newArrayList(); for (String name : names) { diff --git a/ql/src/test/results/clientnegative/delete_non_acid_table.q.out b/ql/src/test/results/clientnegative/delete_non_acid_table.q.out index 19fd5fb426..dafac6d7df 100644 --- a/ql/src/test/results/clientnegative/delete_non_acid_table.q.out +++ b/ql/src/test/results/clientnegative/delete_non_acid_table.q.out @@ -34,4 +34,4 @@ POSTHOOK: Input: default@not_an_acid_table2 -1070883071 0ruyd6Y50JpdGRf6HqD -1070551679 iUR3Q -1069736047 k17Am8uPHWk02cEf1jet -FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table default.not_an_acid_table2 that is not transactional +FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table not_an_acid_table2 that is not transactional diff --git a/ql/src/test/results/clientnegative/desc_failure1.q.out b/ql/src/test/results/clientnegative/desc_failure1.q.out index ca54088df5..29ed890c69 100644 --- a/ql/src/test/results/clientnegative/desc_failure1.q.out +++ b/ql/src/test/results/clientnegative/desc_failure1.q.out @@ -1 +1 @@ -FAILED: SemanticException [Error 10001]: Table not found NonExistentTable +FAILED: SemanticException [Error 10001]: Table not found default.NonExistentTable diff --git a/ql/src/test/results/clientnegative/exchange_partition_neg_table_missing.q.out b/ql/src/test/results/clientnegative/exchange_partition_neg_table_missing.q.out index 9f1c87a669..63a6987a68 100644 --- a/ql/src/test/results/clientnegative/exchange_partition_neg_table_missing.q.out +++ b/ql/src/test/results/clientnegative/exchange_partition_neg_table_missing.q.out @@ -1 +1 @@ -FAILED: SemanticException [Error 10001]: Table not found t1 +FAILED: SemanticException [Error 10001]: Table not found default.t1 diff --git a/ql/src/test/results/clientnegative/merge_negative_2.q.out b/ql/src/test/results/clientnegative/merge_negative_2.q.out index 04113ab152..0e8ca7782a 100644 --- a/ql/src/test/results/clientnegative/merge_negative_2.q.out +++ b/ql/src/test/results/clientnegative/merge_negative_2.q.out @@ -16,4 +16,4 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@srcpart2@ds=2011 POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: source table default.srcpart2 is partitioned but no partition desc found. +FAILED: SemanticException source table hive.default.srcpart2 is partitioned but no partition desc found. diff --git a/ql/src/test/results/clientnegative/merge_negative_3.q.out b/ql/src/test/results/clientnegative/merge_negative_3.q.out index 02c2ad19d0..60883a925c 100644 --- a/ql/src/test/results/clientnegative/merge_negative_3.q.out +++ b/ql/src/test/results/clientnegative/merge_negative_3.q.out @@ -16,4 +16,4 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@srcpart2@ds=2011 POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: Concatenate/Merge can not be performed on bucketed tables +FAILED: SemanticException [Error 30031]: Concatenate/Merge can not be performed on bucketed tables diff --git a/ql/src/test/results/clientnegative/merge_negative_4.q.out b/ql/src/test/results/clientnegative/merge_negative_4.q.out index 975422e2f8..d9eb2219bd 100644 --- a/ql/src/test/results/clientnegative/merge_negative_4.q.out +++ b/ql/src/test/results/clientnegative/merge_negative_4.q.out @@ -16,4 +16,4 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@srcpart2@ds=2011 POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: Concatenate/Merge can not be performed on bucketed tables +FAILED: SemanticException [Error 30031]: Concatenate/Merge can not be performed on bucketed tables diff --git a/ql/src/test/results/clientnegative/mm_delete.q.out b/ql/src/test/results/clientnegative/mm_delete.q.out index ed7bafba98..d0fd905673 100644 --- a/ql/src/test/results/clientnegative/mm_delete.q.out +++ b/ql/src/test/results/clientnegative/mm_delete.q.out @@ -65,4 +65,4 @@ POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@mm_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Lineage: mm_srcpart PARTITION(ds=2008-04-08,hr=11).key SCRIPT [] POSTHOOK: Lineage: mm_srcpart PARTITION(ds=2008-04-08,hr=11).value SCRIPT [] -FAILED: SemanticException [Error 10414]: Attempt to do update or delete on table default.mm_srcpart that is insert-only transactional +FAILED: SemanticException [Error 10414]: Attempt to do update or delete on table mm_srcpart that is insert-only transactional diff --git a/ql/src/test/results/clientnegative/mm_update.q.out b/ql/src/test/results/clientnegative/mm_update.q.out index 946ffd1598..528d16269f 100644 --- a/ql/src/test/results/clientnegative/mm_update.q.out +++ b/ql/src/test/results/clientnegative/mm_update.q.out @@ -55,4 +55,4 @@ POSTHOOK: Input: default@mm_srcpart@ds=2008-04-09/hr=11 2008-04-09 11 43 val_43 2008-04-09 11 413 val_413 2008-04-09 11 413 val_413 -FAILED: SemanticException [Error 10414]: Attempt to do update or delete on table default.mm_srcpart that is insert-only transactional +FAILED: SemanticException [Error 10414]: Attempt to do update or delete on table mm_srcpart that is insert-only transactional diff --git a/ql/src/test/results/clientnegative/update_non_acid_table.q.out b/ql/src/test/results/clientnegative/update_non_acid_table.q.out index 02946fc185..64164ba4ed 100644 --- a/ql/src/test/results/clientnegative/update_non_acid_table.q.out +++ b/ql/src/test/results/clientnegative/update_non_acid_table.q.out @@ -34,4 +34,4 @@ POSTHOOK: Input: default@not_an_acid_table -1070883071 0ruyd6Y50JpdGRf6HqD -1070551679 iUR3Q -1069736047 k17Am8uPHWk02cEf1jet -FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table default.not_an_acid_table that is not transactional +FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table not_an_acid_table that is not transactional diff --git a/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out b/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out index d4ac7f1468..d1905e24c7 100644 --- a/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out +++ b/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out @@ -24,10 +24,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Change Column + table name: default.t1_n14 new column name: c1 new column type: smallint old column name: c1 - table name: default.t1_n14 PREHOOK: query: alter table t1_n14 change column c1 c1 smallint PREHOOK: type: ALTERTABLE_RENAMECOL diff --git a/ql/src/test/results/clientpositive/alter_rename_table.q.out b/ql/src/test/results/clientpositive/alter_rename_table.q.out index 15654d2120..f517132430 100644 --- a/ql/src/test/results/clientpositive/alter_rename_table.q.out +++ b/ql/src/test/results/clientpositive/alter_rename_table.q.out @@ -130,8 +130,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Rename Table - new table name: target.src table name: source.src + new table name: target.src PREHOOK: query: ALTER TABLE source.src RENAME TO target.src PREHOOK: type: ALTERTABLE_RENAME @@ -176,8 +176,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Rename Table - new table name: target.srcpart table name: source.srcpart + new table name: target.srcpart PREHOOK: query: ALTER TABLE source.srcpart RENAME TO target.srcpart PREHOOK: type: ALTERTABLE_RENAME diff --git a/ql/src/test/results/clientpositive/ambiguitycheck.q.out b/ql/src/test/results/clientpositive/ambiguitycheck.q.out index efbd0d8947..3de3d68118 100644 --- a/ql/src/test/results/clientpositive/ambiguitycheck.q.out +++ b/ql/src/test/results/clientpositive/ambiguitycheck.q.out @@ -829,8 +829,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Describe Table -#### A masked pattern was here #### table: default.src +#### A masked pattern was here #### Stage: Stage-1 Fetch Operator diff --git a/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out b/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out index 3c6e3ea8ab..65b2090a4c 100644 --- a/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out +++ b/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out @@ -329,9 +329,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Update Columns + table name: default.avro_extschema_url_parted partition: p1 2018 - table name: default.avro_extschema_url_parted PREHOOK: query: ALTER TABLE avro_extschema_url_parted PARTITION (p1=2018) UPDATE COLUMNS PREHOOK: type: ALTERTABLE_UPDATECOLUMNS diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index 8e5a013379..dba97ccd42 100644 --- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -149,7 +149,6 @@ FROM cmv_basetable_n2 WHERE a = 3 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable_n2 -PREHOOK: Input: default@cmv_mat_view2_n0 PREHOOK: Output: hdfs://### HDFS PATH ### POSTHOOK: query: EXPLAIN SELECT a, c @@ -157,7 +156,6 @@ FROM cmv_basetable_n2 WHERE a = 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable_n2 -POSTHOOK: Input: default@cmv_mat_view2_n0 POSTHOOK: Output: hdfs://### HDFS PATH ### STAGE DEPENDENCIES: Stage-0 is a root stage @@ -168,35 +166,31 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: cmv_mat_view2_n0 - properties: - druid.fieldNames a,c - druid.fieldTypes int,double - druid.query.json {"queryType":"scan","dataSource":"default.cmv_mat_view2_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["a","c"],"resultFormat":"compactedList"} - druid.query.type scan - Select Operator - expressions: a (type: int), c (type: double) - outputColumnNames: _col0, _col1 - ListSink + alias: cmv_basetable_n2 + filterExpr: (a = 3) (type: boolean) + Filter Operator + predicate: (a = 3) (type: boolean) + Select Operator + expressions: 3 (type: int), c (type: double) + outputColumnNames: _col0, _col1 + ListSink PREHOOK: query: SELECT a, c FROM cmv_basetable_n2 WHERE a = 3 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable_n2 -PREHOOK: Input: default@cmv_mat_view2_n0 PREHOOK: Output: hdfs://### HDFS PATH ### POSTHOOK: query: SELECT a, c FROM cmv_basetable_n2 WHERE a = 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable_n2 -POSTHOOK: Input: default@cmv_mat_view2_n0 POSTHOOK: Output: hdfs://### HDFS PATH ### 3 15.8 3 9.8 3 978.76 -Warning: Shuffle Join MERGEJOIN[10][tables = [cmv_mat_view2_n0, $hdt$_0]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: EXPLAIN SELECT * FROM ( (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 @@ -205,7 +199,6 @@ SELECT * FROM ( ON table1.a = table2.a) PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable_n2 -PREHOOK: Input: default@cmv_mat_view2_n0 PREHOOK: Output: hdfs://### HDFS PATH ### POSTHOOK: query: EXPLAIN SELECT * FROM ( @@ -215,7 +208,6 @@ SELECT * FROM ( ON table1.a = table2.a) POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable_n2 -POSTHOOK: Input: default@cmv_mat_view2_n0 POSTHOOK: Output: hdfs://### HDFS PATH ### STAGE DEPENDENCIES: Stage-1 is a root stage @@ -233,11 +225,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_n2 - filterExpr: ((a = 3) and (d = 3)) (type: boolean) - Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE + filterExpr: (a = 3) (type: boolean) + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((a = 3) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + predicate: (a = 3) (type: boolean) + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: c (type: double) outputColumnNames: _col0 @@ -251,19 +243,22 @@ STAGE PLANS: Map 3 Map Operator Tree: TableScan - alias: cmv_mat_view2_n0 - properties: - druid.fieldNames a,c - druid.fieldTypes int,double - druid.query.json {"queryType":"scan","dataSource":"default.cmv_mat_view2_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["a","c"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE - value expressions: a (type: int), c (type: double) + alias: cmv_basetable_n2 + filterExpr: ((d = 3) and (a = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((d = 3) and (a = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: c (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: double) Execution mode: vectorized, llap - LLAP IO: no inputs + LLAP IO: may be used (ACID table) Reducer 2 Execution mode: llap Reduce Operator Tree: @@ -273,15 +268,15 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 63 Basic stats: COMPLETE Column stats: NONE + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), _col1 (type: double), _col0 (type: int), _col2 (type: double) + expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -293,7 +288,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[10][tables = [cmv_mat_view2_n0, $hdt$_0]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN @@ -301,7 +296,6 @@ PREHOOK: query: SELECT * FROM ( ON table1.a = table2.a) PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable_n2 -PREHOOK: Input: default@cmv_mat_view2_n0 PREHOOK: Output: hdfs://### HDFS PATH ### POSTHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 @@ -310,7 +304,6 @@ POSTHOOK: query: SELECT * FROM ( ON table1.a = table2.a) POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable_n2 -POSTHOOK: Input: default@cmv_mat_view2_n0 POSTHOOK: Output: hdfs://### HDFS PATH ### 3 15.8 3 978.76 3 9.8 3 978.76 @@ -367,17 +360,17 @@ STAGE PLANS: TableScan alias: cmv_basetable_n2 filterExpr: (a = 3) (type: boolean) - Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (a = 3) (type: boolean) - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: c (type: double) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: double) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -386,7 +379,7 @@ STAGE PLANS: TableScan alias: cmv_basetable_n2 filterExpr: ((d = 3) and (a = 3)) (type: boolean) - Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((d = 3) and (a = 3)) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -410,14 +403,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -472,9 +465,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.cmv_mat_view2_n0 properties: COLUMN_STATS_ACCURATE - table name: default.cmv_mat_view2_n0 Stage: Stage-4 Materialized View Update @@ -497,10 +490,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_n2 - filterExpr: ((a = 3) and (ROW__ID.writeid > 1L)) (type: boolean) - Statistics: Num rows: 7 Data size: 1652 Basic stats: COMPLETE Column stats: COMPLETE + filterExpr: ((ROW__ID.writeid > 1L) and (a = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((a = 3) and (ROW__ID.writeid > 1L)) (type: boolean) + predicate: ((ROW__ID.writeid > 1L) and (a = 3)) (type: boolean) Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: CAST( t AS timestamp with local time zone) (type: timestamp with local time zone), 3 (type: int), b (type: varchar(256)), c (type: double), userid (type: varchar(256)) @@ -587,7 +580,7 @@ rawDataSize 0 storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler totalSize 0 #### A masked pattern was here #### -Warning: Shuffle Join MERGEJOIN[10][tables = [cmv_mat_view2_n0, $hdt$_0]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: EXPLAIN SELECT * FROM ( (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 @@ -596,7 +589,6 @@ SELECT * FROM ( ON table1.a = table2.a) PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable_n2 -PREHOOK: Input: default@cmv_mat_view2_n0 PREHOOK: Output: hdfs://### HDFS PATH ### POSTHOOK: query: EXPLAIN SELECT * FROM ( @@ -606,7 +598,6 @@ SELECT * FROM ( ON table1.a = table2.a) POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable_n2 -POSTHOOK: Input: default@cmv_mat_view2_n0 POSTHOOK: Output: hdfs://### HDFS PATH ### STAGE DEPENDENCIES: Stage-1 is a root stage @@ -624,11 +615,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_n2 - filterExpr: ((a = 3) and (d = 3)) (type: boolean) - Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + filterExpr: (a = 3) (type: boolean) + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((a = 3) and (d = 3)) (type: boolean) - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + predicate: (a = 3) (type: boolean) + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: c (type: double) outputColumnNames: _col0 @@ -642,19 +633,22 @@ STAGE PLANS: Map 3 Map Operator Tree: TableScan - alias: cmv_mat_view2_n0 - properties: - druid.fieldNames a,c - druid.fieldTypes int,double - druid.query.json {"queryType":"scan","dataSource":"default.cmv_mat_view2_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["a","c"],"resultFormat":"compactedList"} - druid.query.type scan - Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE - value expressions: a (type: int), c (type: double) + alias: cmv_basetable_n2 + filterExpr: ((d = 3) and (a = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((d = 3) and (a = 3)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: c (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: double) Execution mode: vectorized, llap - LLAP IO: no inputs + LLAP IO: may be used (ACID table) Reducer 2 Execution mode: llap Reduce Operator Tree: @@ -664,15 +658,15 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 63 Basic stats: COMPLETE Column stats: NONE + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), _col1 (type: double), _col0 (type: int), _col2 (type: double) + expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 63 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -684,7 +678,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[10][tables = [cmv_mat_view2_n0, $hdt$_0]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN @@ -692,7 +686,6 @@ PREHOOK: query: SELECT * FROM ( ON table1.a = table2.a) PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable_n2 -PREHOOK: Input: default@cmv_mat_view2_n0 PREHOOK: Output: hdfs://### HDFS PATH ### POSTHOOK: query: SELECT * FROM ( (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 @@ -701,7 +694,6 @@ POSTHOOK: query: SELECT * FROM ( ON table1.a = table2.a) POSTHOOK: type: QUERY POSTHOOK: Input: default@cmv_basetable_n2 -POSTHOOK: Input: default@cmv_mat_view2_n0 POSTHOOK: Output: hdfs://### HDFS PATH ### 3 15.8 3 978.76 3 15.8 3 978.76 diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out index 7aa16d35a2..ff3bda628c 100644 --- a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out +++ b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out @@ -109,8 +109,8 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:default src -FAILED: SemanticException [Error 10001]: Table not found encrypted_table_n1 -FAILED: SemanticException [Error 10001]: Table not found encrypted_table_n1 +FAILED: SemanticException [Error 10001]: Table not found default.encrypted_table_n1 +FAILED: SemanticException [Error 10001]: Table not found default.encrypted_table_n1 FAILED: SemanticException [Error 10001]: Table not found default.encrypted_table_n1 PREHOOK: query: SHOW TABLES PREHOOK: type: SHOWTABLES diff --git a/ql/src/test/results/clientpositive/explain_ddl.q.out b/ql/src/test/results/clientpositive/explain_ddl.q.out index e0ec1c54cc..249a02635d 100644 --- a/ql/src/test/results/clientpositive/explain_ddl.q.out +++ b/ql/src/test/results/clientpositive/explain_ddl.q.out @@ -795,7 +795,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Truncate Table or Partition - table name: M1 + table name: default.m1 PREHOOK: query: select count(*) from M1 where key > 0 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/input1.q.out b/ql/src/test/results/clientpositive/input1.q.out index 623b973037..2c41c6e8a1 100644 --- a/ql/src/test/results/clientpositive/input1.q.out +++ b/ql/src/test/results/clientpositive/input1.q.out @@ -21,7 +21,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Describe Table - table: TEST1_n6 + table: default.TEST1_n6 Stage: Stage-1 Fetch Operator diff --git a/ql/src/test/results/clientpositive/input10.q.out b/ql/src/test/results/clientpositive/input10.q.out index 977cf333a2..48af674852 100644 --- a/ql/src/test/results/clientpositive/input10.q.out +++ b/ql/src/test/results/clientpositive/input10.q.out @@ -21,7 +21,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Describe Table - table: TEST10 + table: default.TEST10 Stage: Stage-1 Fetch Operator diff --git a/ql/src/test/results/clientpositive/input3.q.out b/ql/src/test/results/clientpositive/input3.q.out index 3d82be7716..0365ff25ba 100644 --- a/ql/src/test/results/clientpositive/input3.q.out +++ b/ql/src/test/results/clientpositive/input3.q.out @@ -55,8 +55,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Add Columns - new columns: x double table name: default.TEST3b + new columns: x double PREHOOK: query: ALTER TABLE TEST3b ADD COLUMNS (X DOUBLE) PREHOOK: type: ALTERTABLE_ADDCOLS @@ -92,8 +92,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Rename Table - new table name: default.TEST3c table name: default.TEST3b + new table name: default.TEST3c PREHOOK: query: ALTER TABLE TEST3b RENAME TO TEST3c PREHOOK: type: ALTERTABLE_RENAME @@ -138,8 +138,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Replace Columns - new columns: r1 int, r2 double table name: default.TEST3c + new columns: r1 int, r2 double PREHOOK: query: ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS diff --git a/ql/src/test/results/clientpositive/inputddl6.q.out b/ql/src/test/results/clientpositive/inputddl6.q.out index 155c4b3264..d864c82424 100644 --- a/ql/src/test/results/clientpositive/inputddl6.q.out +++ b/ql/src/test/results/clientpositive/inputddl6.q.out @@ -92,9 +92,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Describe Table + table: default.INPUTDDL6 partition: ds 2008-04-09 - table: INPUTDDL6 extended: true Stage: Stage-1 diff --git a/ql/src/test/results/clientpositive/rename_column.q.out b/ql/src/test/results/clientpositive/rename_column.q.out index 0783f70a5a..15816bbf98 100644 --- a/ql/src/test/results/clientpositive/rename_column.q.out +++ b/ql/src/test/results/clientpositive/rename_column.q.out @@ -63,10 +63,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Change Column + table name: default.kv_rename_test new column name: a2 new column type: int old column name: a1 - table name: default.kv_rename_test first: true PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT FIRST @@ -101,10 +101,10 @@ STAGE PLANS: Stage: Stage-0 Change Column after column: b + table name: default.kv_rename_test new column name: a new column type: int old column name: a2 - table name: default.kv_rename_test PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a2 a INT AFTER b PREHOOK: type: ALTERTABLE_RENAMECOL @@ -154,11 +154,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Change Column + table name: default.kv_rename_test new column comment: test comment2 new column name: a2 new column type: int old column name: a1 - table name: default.kv_rename_test first: true PREHOOK: query: ALTER TABLE kv_rename_test CHANGE a1 a2 INT COMMENT 'test comment2' FIRST diff --git a/ql/src/test/results/clientpositive/set_tblproperties.q.out b/ql/src/test/results/clientpositive/set_tblproperties.q.out index 3503ea5514..7003274e82 100644 --- a/ql/src/test/results/clientpositive/set_tblproperties.q.out +++ b/ql/src/test/results/clientpositive/set_tblproperties.q.out @@ -20,10 +20,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Set Properties + table name: default.t properties: a x b y - table name: default.t PREHOOK: query: ALTER TABLE t SET TBLPROPERTIES('a'='x', 'b'='y') PREHOOK: type: ALTERTABLE_PROPERTIES @@ -68,10 +68,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Unset Properties + table name: default.t properties: a b - table name: default.t PREHOOK: query: ALTER TABLE t UNSET TBLPROPERTIES('a', 'b') PREHOOK: type: ALTERTABLE_PROPERTIES diff --git a/ql/src/test/results/clientpositive/table_set_owner.q.out b/ql/src/test/results/clientpositive/table_set_owner.q.out index 8528faf245..023cceed97 100644 --- a/ql/src/test/results/clientpositive/table_set_owner.q.out +++ b/ql/src/test/results/clientpositive/table_set_owner.q.out @@ -15,10 +15,11 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 +#### A masked pattern was here #### + table name: default.t #### A masked pattern was here #### Principal name: user1 - table name: default.t PREHOOK: query: ALTER TABLE t SET OWNER USER user1 PREHOOK: type: ALTERTABLE_OWNER diff --git a/ql/src/test/results/clientpositive/table_storage.q.out b/ql/src/test/results/clientpositive/table_storage.q.out index 7de66e6a41..9837d30e2b 100644 --- a/ql/src/test/results/clientpositive/table_storage.q.out +++ b/ql/src/test/results/clientpositive/table_storage.q.out @@ -41,9 +41,9 @@ STAGE PLANS: Stage: Stage-0 Clustered By bucket columns: key + table name: default.t number of buckets: 2 sort columns: key ASC - table name: default.t PREHOOK: query: ALTER TABLE t CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT @@ -90,8 +90,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Into Buckets - number of buckets: 3 table name: default.t + number of buckets: 3 PREHOOK: query: ALTER TABLE t INTO 3 BUCKETS PREHOOK: type: ALTERTABLE_BUCKETNUM @@ -230,9 +230,9 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Skewed By + table name: default.t skewedColumnNames: key skewedColumnValues: [a], [b] - table name: default.t isStoredAsDirectories: true PREHOOK: query: ALTER TABLE t SKEWED BY (key) ON (("a"), ("b")) STORED AS DIRECTORIES @@ -281,6 +281,7 @@ STAGE PLANS: Stage: Stage-0 #### A masked pattern was here #### table name: default.t +#### A masked pattern was here #### #### A masked pattern was here #### PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION @@ -372,10 +373,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Set File Format + table name: default.t input format: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat output format: org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat serde name: org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe - table name: default.t PREHOOK: query: ALTER TABLE t SET FILEFORMAT parquet PREHOOK: type: ALTERTABLE_FILEFORMAT @@ -420,6 +421,7 @@ STAGE PLANS: Stage: Stage-0 #### A masked pattern was here #### table name: default.t +#### A masked pattern was here #### #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_LOCATION @@ -464,8 +466,8 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Set Serde - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe table name: default.t + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe PREHOOK: query: ALTER TABLE t SET SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" PREHOOK: type: ALTERTABLE_SERIALIZER @@ -509,10 +511,10 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Set Serde Props + table name: default.t properties: property1 value1 property2 value2 - table name: default.t PREHOOK: query: ALTER TABLE t SET SERDEPROPERTIES('property1'='value1', 'property2'='value2') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES diff --git a/ql/src/test/results/clientpositive/temp_table_truncate.q.out b/ql/src/test/results/clientpositive/temp_table_truncate.q.out index 0fa4f968ed..20aeafc2db 100644 --- a/ql/src/test/results/clientpositive/temp_table_truncate.q.out +++ b/ql/src/test/results/clientpositive/temp_table_truncate.q.out @@ -82,7 +82,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Truncate Table or Partition - table name: tmp_src + table name: default.tmp_src PREHOOK: query: TRUNCATE TABLE tmp_src PREHOOK: type: TRUNCATETABLE @@ -111,7 +111,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Truncate Table or Partition - table name: tmp_srcpart + table name: default.tmp_srcpart PREHOOK: query: TRUNCATE TABLE tmp_srcpart PREHOOK: type: TRUNCATETABLE diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out index 2ef7b133f8..deab69b40f 100644 --- a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out +++ b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out @@ -174,7 +174,7 @@ POSTHOOK: type: ALTERTABLE_RENAME POSTHOOK: Input: newdb@tab_n2 POSTHOOK: Output: newdb@tab_n2 Stage-0 - Rename Table{"new table name:":"newDB.newName","table name:":"newDB.tab_n2"} + Rename Table{"table name:":"newDB.tab_n2","new table name:":"newDB.newName"} PREHOOK: query: drop table tab_n2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out index fa85521be4..2963a5357d 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out @@ -178,7 +178,7 @@ POSTHOOK: type: ALTERTABLE_RENAME POSTHOOK: Input: newdb@tab_n1 POSTHOOK: Output: newdb@tab_n1 Stage-0 - Rename Table{"new table name:":"newDB.newName","table name:":"newDB.tab_n1"} + Rename Table{"table name:":"newDB.tab_n1","new table name:":"newDB.newName"} PREHOOK: query: explain drop table tab_n1 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/truncate_table.q.out b/ql/src/test/results/clientpositive/truncate_table.q.out index a8b4cab21b..0607bcb68d 100644 --- a/ql/src/test/results/clientpositive/truncate_table.q.out +++ b/ql/src/test/results/clientpositive/truncate_table.q.out @@ -126,7 +126,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Truncate Table or Partition - table name: src_truncate + table name: default.src_truncate PREHOOK: query: TRUNCATE TABLE src_truncate PREHOOK: type: TRUNCATETABLE @@ -166,7 +166,7 @@ STAGE PLANS: partition spec: ds 2008-04-08 hr 11 - table name: srcpart_truncate + table name: default.srcpart_truncate PREHOOK: query: TRUNCATE TABLE srcpart_truncate partition (ds='2008-04-08', hr='11') PREHOOK: type: TRUNCATETABLE @@ -210,7 +210,7 @@ STAGE PLANS: partition spec: ds hr 12 - table name: srcpart_truncate + table name: default.srcpart_truncate PREHOOK: query: TRUNCATE TABLE srcpart_truncate partition (ds, hr='12') PREHOOK: type: TRUNCATETABLE @@ -259,7 +259,7 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-0 Truncate Table or Partition - table name: srcpart_truncate + table name: default.srcpart_truncate PREHOOK: query: TRUNCATE TABLE srcpart_truncate PREHOOK: type: TRUNCATETABLE diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java b/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java index f5cb192561..a0f47c2ebf 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java @@ -17,34 +17,45 @@ */ package org.apache.hadoop.hive.common; +import java.io.Serializable; +import java.util.Objects; + /** * A container for a fully qualified table name, i.e. catalogname.databasename.tablename. Also * includes utilities for string parsing. */ -public class TableName { +public class TableName implements Serializable { + + private static final long serialVersionUID = 1L; + + /** Exception message thrown. */ + private static final String ILL_ARG_EXCEPTION_MSG = + "Table name must be either , . " + "or .."; + + /** Names of the related DB objects. */ private final String cat; private final String db; private final String table; /** * - * @param cat catalog name. Cannot be null. If you do not know it you can get it from + * @param catName catalog name. Cannot be null. If you do not know it you can get it from * SessionState.getCurrentCatalog() if you want to use the catalog from the current * session, or from MetaStoreUtils.getDefaultCatalog() if you do not have a session * or want to use the default catalog for the Hive instance. - * @param db database name. Cannot be null. If you do not now it you can get it from + * @param dbName database name. Cannot be null. If you do not now it you can get it from * SessionState.getCurrentDatabase() or use Warehouse.DEFAULT_DATABASE_NAME. - * @param table table name, cannot be null + * @param tableName table name, cannot be null */ - public TableName(String cat, String db, String table) { - this.cat = cat; - this.db = db; - this.table = table; + public TableName(final String catName, final String dbName, final String tableName) { + this.cat = catName; + this.db = dbName; + this.table = tableName; } /** * Build a TableName from a string of the form [[catalog.]database.]table. - * @param name name in string form + * @param name name in string form, not null * @param defaultCatalog default catalog to use if catalog is not in the name. If you do not * know it you can get it from SessionState.getCurrentCatalog() if you * want to use the catalog from the current session, or from @@ -54,17 +65,21 @@ public TableName(String cat, String db, String table) { * not now it you can get it from SessionState.getCurrentDatabase() or * use Warehouse.DEFAULT_DATABASE_NAME. * @return TableName + * @throws IllegalArgumentException if a non-null name is given */ - public static TableName fromString(String name, String defaultCatalog, String defaultDatabase) { + public static TableName fromString(final String name, final String defaultCatalog, final String defaultDatabase) + throws IllegalArgumentException { + if (name == null) { + throw new IllegalArgumentException(String.join("", "Table value was null. ", ILL_ARG_EXCEPTION_MSG)); + } if (name.contains(DatabaseName.CAT_DB_TABLE_SEPARATOR)) { - String names[] = name.split("\\."); + String[] names = name.split("\\."); if (names.length == 2) { return new TableName(defaultCatalog, names[0], names[1]); } else if (names.length == 3) { return new TableName(names[0], names[1], names[2]); } else { - throw new RuntimeException("Table name must be either , . " + - "or .."); + throw new IllegalArgumentException(ILL_ARG_EXCEPTION_MSG); } } else { @@ -86,10 +101,28 @@ public String getTable() { /** * Get the name in db.table format, for use with stuff not yet converted to use the catalog. + * Fair warning, that if the db is null, this will return null.tableName + * @deprecated use {@link #getNotEmptyDbTable()} instead. */ + // to be @Deprecated public String getDbTable() { return db + DatabaseName.CAT_DB_TABLE_SEPARATOR + table; + } + + /** + * Get the name in `db`.`table` escaped format, if db is not empty, otherwise pass only the table name. + */ + public String getEscapedNotEmptyDbTable() { + return + db == null || db.trim().isEmpty() ? + "`" + table + "`" : "`" + db + "`" + DatabaseName.CAT_DB_TABLE_SEPARATOR + "`" + table + "`"; + } + /** + * Get the name in db.table format, if db is not empty, otherwise pass only the table name. + */ + public String getNotEmptyDbTable() { + return db == null || db.trim().isEmpty() ? table : db + DatabaseName.CAT_DB_TABLE_SEPARATOR + table; } /** @@ -97,25 +130,26 @@ public String getDbTable() { */ public static String getDbTable(String dbName, String tableName) { return dbName + DatabaseName.CAT_DB_TABLE_SEPARATOR + tableName; - } public static String getQualified(String catName, String dbName, String tableName) { return catName + DatabaseName.CAT_DB_TABLE_SEPARATOR + dbName + DatabaseName.CAT_DB_TABLE_SEPARATOR + tableName; } - @Override - public int hashCode() { - return (cat.hashCode() * 31 + db.hashCode()) * 31 + table.hashCode(); + @Override public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TableName tableName = (TableName) o; + return Objects.equals(cat, tableName.cat) && Objects.equals(db, tableName.db) && Objects + .equals(table, tableName.table); } - @Override - public boolean equals(Object obj) { - if (obj != null && obj instanceof TableName) { - TableName that = (TableName)obj; - return table.equals(that.table) && db.equals(that.db) && cat.equals(that.cat); - } - return false; + @Override public int hashCode() { + return Objects.hash(cat, db, table); } @Override diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java index 0a8cb2a82e..f19c7358c9 100644 --- a/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java +++ b/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java @@ -22,7 +22,7 @@ public class TestTableName { @Test - public void fullname() { + public void fullName() { TableName name = new TableName("cat", "db", "t"); Assert.assertEquals("cat", name.getCat()); Assert.assertEquals("db", name.getDb()); @@ -47,5 +47,24 @@ public void fromString() { Assert.assertEquals("cat", name.getCat()); Assert.assertEquals("db", name.getDb()); Assert.assertEquals("tab", name.getTable()); + + try { + TableName.fromString(null, null, null); + Assert.fail("Name can't be null"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(true); + } + } + + @Test + public void testNotEmptyDbTable() { + TableName name = new TableName("cat", "db", "t"); + Assert.assertEquals("db.t", name.getNotEmptyDbTable()); + + name = new TableName("cat", null, "t"); + Assert.assertEquals("t", name.getNotEmptyDbTable()); + + name = new TableName("cat", "", "t"); + Assert.assertEquals("t", name.getNotEmptyDbTable()); } }